int xway_nand_probe(struct platform_device *pdev) { // ltq_gpio_request(PIN_CS1, 1, 0, 1, "NAND_CS1"); ltq_gpio_request(PIN_CLE, 1, 0, 1, "NAND_CLE"); ltq_gpio_request(PIN_ALE, 1, 0, 1, "NAND_ALE"); if (ltq_is_ar9() || ltq_is_vr9()) { ltq_gpio_request(PIN_RDY, 1, 0, 0, "NAND_BSY"); ltq_gpio_request(PIN_RD, 1, 0, 1, "NAND_RD"); } ltq_ebu_w32((NAND_BASE_ADDRESS & 0x1fffff00) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, LTQ_EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | NAND_CON_IN_CS0 | NAND_CON_OUT_CS0, LTQ_EBU_NAND_CON); ltq_w32(NAND_WRITE_CMD_RESET, ((u32*)(NAND_BASE_ADDRESS | NAND_WRITE_CMD))); while((ltq_ebu_r32(LTQ_EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0); return 0; }
static void ltq_ebu_apply(void) { unsigned long flags; spin_lock_irqsave(&ebu_lock, flags); ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow; ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); spin_unlock_irqrestore(&ebu_lock, flags); }
/** * ltq_mm_apply() - write the shadow value to the ebu address. * @chip: Pointer to our private data structure. * * Write the shadow value to the EBU to set the gpios. We need to set the * global EBU lock to make sure that PCI/MTD dont break. */ static void ltq_mm_apply(struct ltq_mm *chip) { unsigned long flags; spin_lock_irqsave(&ebu_lock, flags); ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1); __raw_writew(chip->shadow, chip->mmchip.regs); ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); spin_unlock_irqrestore(&ebu_lock, flags); }
/** * ltq_mm_save_regs() - Set initial values of GPIO pins * @mm_gc: pointer to memory mapped GPIO chip structure */ static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc) { struct ltq_mm *chip = container_of(mm_gc, struct ltq_mm, mmchip); /* tell the ebu controller which memory address we will be using */ ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1); ltq_mm_apply(chip); }
static int ltq_ebu_probe(struct platform_device *pdev) { int ret = 0; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "failed to get memory resource\n"); return -ENOENT; } res = devm_request_mem_region(&pdev->dev, res->start, resource_size(res), dev_name(&pdev->dev)); if (!res) { dev_err(&pdev->dev, "failed to request memory resource\n"); return -EBUSY; } ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start, resource_size(res)); if (!ltq_ebu_gpio_membase) { dev_err(&pdev->dev, "Failed to ioremap mem region\n"); return -ENOMEM; } /* grab the default shadow value passed form the platform code */ ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data; /* tell the ebu controller which memory address we will be using */ ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1); /* write protect the region */ ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1); ret = gpiochip_add(<q_ebu_chip); if (!ret) ltq_ebu_apply(); return ret; }
static void ltq_hw_irqdispatch(int module) { u32 irq; irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); if (irq == 0) return; irq = __fls(irq); do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); }
static void ltq_hw_irqdispatch(int module) { u32 irq; irq = ltq_icu_r32(LTQ_ICU_IM0_IOSR + (module * LTQ_ICU_OFFSET)); if (irq == 0) return; /* silicon bug causes only the msb set to 1 to be valid. all * other bits might be bogus */ irq = __fls(irq); do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); /* if this is a EBU irq, we need to ack it or get a deadlock */ if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, LTQ_EBU_PCC_ISTAT); }
static int __init lantiq_ebu_init(void) { /* */ if (insert_resource(&iomem_resource, <q_ebu_resource) < 0) panic("Failed to insert ebu memory"); if (request_mem_region(ltq_ebu_resource.start, resource_size(<q_ebu_resource), "ebu") < 0) panic("Failed to request ebu memory"); /* */ ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start, resource_size(<q_ebu_resource)); if (!ltq_ebu_membase) panic("Failed to remap ebu memory"); /* */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); return 0; }
/* bring up all register ranges that we need for basic system control */ void __init ltq_soc_init(void) { struct resource res_pmu, res_cgu, res_ebu; struct device_node *np_pmu = of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway"); struct device_node *np_cgu = of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway"); struct device_node *np_ebu = of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway"); /* check if all the core register ranges are available */ if (!np_pmu || !np_cgu || !np_ebu) panic("Failed to load core nodess from devicetree"); if (of_address_to_resource(np_pmu, 0, &res_pmu) || of_address_to_resource(np_cgu, 0, &res_cgu) || of_address_to_resource(np_ebu, 0, &res_ebu)) panic("Failed to get core resources"); if ((request_mem_region(res_pmu.start, resource_size(&res_pmu), res_pmu.name) < 0) || (request_mem_region(res_cgu.start, resource_size(&res_cgu), res_cgu.name) < 0) || (request_mem_region(res_ebu.start, resource_size(&res_ebu), res_ebu.name) < 0)) pr_err("Failed to request core reources"); pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu)); ltq_cgu_membase = ioremap_nocache(res_cgu.start, resource_size(&res_cgu)); ltq_ebu_membase = ioremap_nocache(res_ebu.start, resource_size(&res_ebu)); if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase) panic("Failed to remap core resources"); /* make sure to unprotect the memory region where flash is located */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); /* add our generic xway clocks */ clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI); clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0); clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT); clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP); clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA); clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI); clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU); clkdev_add_clkout(); /* add the soc dependent clocks */ if (of_machine_is_compatible("lantiq,vr9")) { ifccr = CGU_IFCCR_VR9; pcicr = CGU_PCICR_VR9; } else { clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE); } if (!of_machine_is_compatible("lantiq,ase")) { clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1); clkdev_add_pci(); } if (of_machine_is_compatible("lantiq,ase")) { if (ltq_cgu_r32(CGU_SYS) & (1 << 5)) clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M); else clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M); clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY), clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY); } else if (of_machine_is_compatible("lantiq,vr9")) { clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(), ltq_vr9_fpi_hz()); clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY); clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK); clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI); clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI); clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL); clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS); } else if (of_machine_is_compatible("lantiq,ar9")) { clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz()); clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH); } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz()); } }
static int ltq_pci_startup(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const __be32 *req_mask, *bus_clk; u32 temp_buffer; /* get our clocks */ clk_pci = clk_get(&pdev->dev, NULL); if (IS_ERR(clk_pci)) { dev_err(&pdev->dev, "failed to get pci clock\n"); return PTR_ERR(clk_pci); } clk_external = clk_get(&pdev->dev, "external"); if (IS_ERR(clk_external)) { clk_put(clk_pci); dev_err(&pdev->dev, "failed to get external pci clock\n"); return PTR_ERR(clk_external); } /* read the bus speed that we want */ bus_clk = of_get_property(node, "lantiq,bus-clock", NULL); if (bus_clk) clk_set_rate(clk_pci, *bus_clk); /* and enable the clocks */ clk_enable(clk_pci); if (of_find_property(node, "lantiq,external-clock", NULL)) clk_enable(clk_external); else clk_disable(clk_external); /* setup reset gpio used by pci */ reset_gpio = of_get_named_gpio(node, "gpio-reset", 0); if (gpio_is_valid(reset_gpio)) { int ret = devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset"); if (ret) { dev_err(&pdev->dev, "failed to request gpio %d\n", reset_gpio); return ret; } gpio_direction_output(reset_gpio, 1); } /* enable auto-switching between PCI and EBU */ ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); /* busy, i.e. configuration is not done, PCI access has to be retried */ ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) & ~(1 << 24), PCI_CR_PCI_MOD); wmb(); /* BUS Master/IO/MEM access */ ltq_pci_cfg_w32(ltq_pci_cfg_r32(PCI_CS_STS_CMD) | 7, PCI_CS_STS_CMD); /* enable external 2 PCI masters */ temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); /* setup the request mask */ req_mask = of_get_property(node, "req-mask", NULL); if (req_mask) temp_buffer &= ~((*req_mask & 0xf) << 16); else temp_buffer &= ~0xf0000; /* enable internal arbiter */ temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); /* enable internal PCI master reqest */ temp_buffer &= (~(3 << PCI_MASTER0_REQ_MASK_2BITS)); /* enable EBU request */ temp_buffer &= (~(3 << PCI_MASTER1_REQ_MASK_2BITS)); /* enable all external masters request */ temp_buffer &= (~(3 << PCI_MASTER2_REQ_MASK_2BITS)); ltq_pci_w32(temp_buffer, PCI_CR_PC_ARB); wmb(); /* setup BAR memory regions */ ltq_pci_w32(0x18000000, PCI_CR_FCI_ADDR_MAP0); ltq_pci_w32(0x18400000, PCI_CR_FCI_ADDR_MAP1); ltq_pci_w32(0x18800000, PCI_CR_FCI_ADDR_MAP2); ltq_pci_w32(0x18c00000, PCI_CR_FCI_ADDR_MAP3); ltq_pci_w32(0x19000000, PCI_CR_FCI_ADDR_MAP4); ltq_pci_w32(0x19400000, PCI_CR_FCI_ADDR_MAP5); ltq_pci_w32(0x19800000, PCI_CR_FCI_ADDR_MAP6); ltq_pci_w32(0x19c00000, PCI_CR_FCI_ADDR_MAP7); ltq_pci_w32(0x1ae00000, PCI_CR_FCI_ADDR_MAP11hg); ltq_pci_w32(ltq_calc_bar11mask(), PCI_CR_BAR11MASK); ltq_pci_w32(0, PCI_CR_PCI_ADDR_MAP11); ltq_pci_w32(0, PCI_CS_BASE_ADDR1); /* both TX and RX endian swap are enabled */ ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_EOI) | 3, PCI_CR_PCI_EOI); wmb(); ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR12MASK) | 0x80000000, PCI_CR_BAR12MASK); ltq_pci_w32(ltq_pci_r32(PCI_CR_BAR13MASK) | 0x80000000, PCI_CR_BAR13MASK); /*use 8 dw burst length */ ltq_pci_w32(0x303, PCI_CR_FCI_BURST_LENGTH); ltq_pci_w32(ltq_pci_r32(PCI_CR_PCI_MOD) | (1 << 24), PCI_CR_PCI_MOD); wmb(); /* setup irq line */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_CON) | 0xc, LTQ_EBU_PCC_CON); ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); /* toggle reset pin */ if (gpio_is_valid(reset_gpio)) { __gpio_set_value(reset_gpio, 0); wmb(); mdelay(1); __gpio_set_value(reset_gpio, 1); } return 0; }
/* bring up all register ranges that we need for basic system control */ void __init ltq_soc_init(void) { struct resource res_pmu, res_cgu, res_ebu; struct device_node *np_pmu = of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway"); struct device_node *np_cgu = of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway"); struct device_node *np_ebu = of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway"); /* check if all the core register ranges are available */ if (!np_pmu || !np_cgu || !np_ebu) panic("Failed to load core nodes from devicetree"); if (of_address_to_resource(np_pmu, 0, &res_pmu) || of_address_to_resource(np_cgu, 0, &res_cgu) || of_address_to_resource(np_ebu, 0, &res_ebu)) panic("Failed to get core resources"); if (!request_mem_region(res_pmu.start, resource_size(&res_pmu), res_pmu.name) || !request_mem_region(res_cgu.start, resource_size(&res_cgu), res_cgu.name) || !request_mem_region(res_ebu.start, resource_size(&res_ebu), res_ebu.name)) pr_err("Failed to request core resources"); pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu)); ltq_cgu_membase = ioremap_nocache(res_cgu.start, resource_size(&res_cgu)); ltq_ebu_membase = ioremap_nocache(res_ebu.start, resource_size(&res_ebu)); if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase) panic("Failed to remap core resources"); if (of_machine_is_compatible("lantiq,vr9")) { struct resource res_xbar; struct device_node *np_xbar = of_find_compatible_node(NULL, NULL, "lantiq,xbar-xway"); if (!np_xbar) panic("Failed to load xbar nodes from devicetree"); if (of_address_to_resource(np_xbar, 0, &res_xbar)) panic("Failed to get xbar resources"); if (!request_mem_region(res_xbar.start, resource_size(&res_xbar), res_xbar.name)) panic("Failed to get xbar resources"); ltq_xbar_membase = ioremap_nocache(res_xbar.start, resource_size(&res_xbar)); if (!ltq_xbar_membase) panic("Failed to remap xbar resources"); } /* make sure to unprotect the memory region where flash is located */ ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0); /* add our generic xway clocks */ clkdev_add_pmu("10000000.fpi", NULL, 0, 0, PMU_FPI); clkdev_add_pmu("1e100400.serial", NULL, 0, 0, PMU_ASC0); clkdev_add_pmu("1e100a00.gptu", NULL, 1, 0, PMU_GPT); clkdev_add_pmu("1e100bb0.stp", NULL, 1, 0, PMU_STP); clkdev_add_pmu("1e104100.dma", NULL, 1, 0, PMU_DMA); clkdev_add_pmu("1e100800.spi", NULL, 1, 0, PMU_SPI); clkdev_add_pmu("1e105300.ebu", NULL, 0, 0, PMU_EBU); clkdev_add_clkout(); /* add the soc dependent clocks */ if (of_machine_is_compatible("lantiq,vr9")) { ifccr = CGU_IFCCR_VR9; pcicr = CGU_PCICR_VR9; } else { clkdev_add_pmu("1e180000.etop", NULL, 1, 0, PMU_PPE); } if (!of_machine_is_compatible("lantiq,ase")) { clkdev_add_pmu("1e100c00.serial", NULL, 0, 0, PMU_ASC1); clkdev_add_pci(); } if (of_machine_is_compatible("lantiq,grx390") || of_machine_is_compatible("lantiq,ar10")) { clkdev_add_pmu("1e101000.usb", "phy", 1, 2, PMU_ANALOG_USB0_P); clkdev_add_pmu("1e106000.usb", "phy", 1, 2, PMU_ANALOG_USB1_P); /* rc 0 */ clkdev_add_pmu("1d900000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE0_P); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); clkdev_add_pmu("1d900000.pcie", "pdi", 1, 1, PMU1_PCIE_PDI); clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL); /* rc 1 */ clkdev_add_pmu("19000000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE1_P); clkdev_add_pmu("19000000.pcie", "msi", 1, 1, PMU1_PCIE1_MSI); clkdev_add_pmu("19000000.pcie", "pdi", 1, 1, PMU1_PCIE1_PDI); clkdev_add_pmu("19000000.pcie", "ctl", 1, 1, PMU1_PCIE1_CTL); } if (of_machine_is_compatible("lantiq,ase")) { if (ltq_cgu_r32(CGU_SYS) & (1 << 5)) clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M, CLOCK_266M); else clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M, CLOCK_133M); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e180000.etop", "ppe", 1, 0, PMU_PPE); clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY); clkdev_add_pmu("1e180000.etop", "ephy", 1, 0, PMU_EPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_ASE_SDIO); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,grx390")) { clkdev_add_static(ltq_grx390_cpu_hz(), ltq_grx390_fpi_hz(), ltq_grx390_fpi_hz(), ltq_grx390_pp32_hz()); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); /* rc 2 */ clkdev_add_pmu("1a800000.pcie", "phy", 1, 2, PMU_ANALOG_PCIE2_P); clkdev_add_pmu("1a800000.pcie", "msi", 1, 1, PMU1_PCIE2_MSI); clkdev_add_pmu("1a800000.pcie", "pdi", 1, 1, PMU1_PCIE2_PDI); clkdev_add_pmu("1a800000.pcie", "ctl", 1, 1, PMU1_PCIE2_CTL); clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); } else if (of_machine_is_compatible("lantiq,ar10")) { clkdev_add_static(ltq_ar10_cpu_hz(), ltq_ar10_fpi_hz(), ltq_ar10_fpi_hz(), ltq_ar10_pp32_hz()); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DP | PMU_PPE_TC); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); clkdev_add_pmu("1f203000.rcu", "gphy", 1, 0, PMU_GPHY); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "afe", 1, 2, PMU_ANALOG_DSL_AFE); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,vr9")) { clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(), ltq_vr9_fpi_hz(), ltq_vr9_pp32_hz()); clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0 | PMU_AHBM); clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1 | PMU_AHBM); clkdev_add_pmu("1d900000.pcie", "phy", 1, 1, PMU1_PCIE_PHY); clkdev_add_pmu("1d900000.pcie", "bus", 1, 0, PMU_PCIE_CLK); clkdev_add_pmu("1d900000.pcie", "msi", 1, 1, PMU1_PCIE_MSI); clkdev_add_pmu("1d900000.pcie", "pdi", 1, 1, PMU1_PCIE_PDI); clkdev_add_pmu("1d900000.pcie", "ctl", 1, 1, PMU1_PCIE_CTL); clkdev_add_pmu(NULL, "ahb", 1, 0, PMU_AHBM | PMU_AHBS); clkdev_add_pmu("1da00000.usif", "NULL", 1, 0, PMU_USIF); clkdev_add_pmu("1e108000.eth", NULL, 0, 0, PMU_SWITCH | PMU_PPE_DPLUS | PMU_PPE_DPLUM | PMU_PPE_EMA | PMU_PPE_TC | PMU_PPE_SLL01 | PMU_PPE_QSB | PMU_PPE_TOP); clkdev_add_pmu("1f203000.rcu", "gphy", 0, 0, PMU_GPHY); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); } else if (of_machine_is_compatible("lantiq,ar9")) { clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(), ltq_ar9_fpi_hz(), CLOCK_250M); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e106000.usb", "ctl", 1, 0, PMU_USB1); clkdev_add_pmu("1e106000.usb", "phy", 1, 0, PMU_USB1_P); clkdev_add_pmu("1e180000.etop", "switch", 1, 0, PMU_SWITCH); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0); } else { clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(), ltq_danube_fpi_hz(), ltq_danube_pp32_hz()); clkdev_add_pmu("1e101000.usb", "ctl", 1, 0, PMU_USB0); clkdev_add_pmu("1e101000.usb", "phy", 1, 0, PMU_USB0_P); clkdev_add_pmu("1e103000.sdio", NULL, 1, 0, PMU_SDIO); clkdev_add_pmu("1e103100.deu", NULL, 1, 0, PMU_DEU); clkdev_add_pmu("1e116000.mei", "dfe", 1, 0, PMU_DFE); clkdev_add_pmu("1e100400.serial", NULL, 1, 0, PMU_ASC0); } if (of_machine_is_compatible("lantiq,vr9")) xbar_fpi_burst_disable(); }
/* * Probe for the NAND device. */ static int xway_nand_probe(struct platform_device *pdev) { struct xway_nand_data *data; struct mtd_info *mtd; struct resource *res; int err; u32 cs; u32 cs_flag = 0; /* Allocate memory for the device structure (and zero it) */ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), GFP_KERNEL); if (!data) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->nandaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->nandaddr)) return PTR_ERR(data->nandaddr); nand_set_flash_node(&data->chip, pdev->dev.of_node); mtd = nand_to_mtd(&data->chip); mtd->dev.parent = &pdev->dev; data->chip.cmd_ctrl = xway_cmd_ctrl; data->chip.dev_ready = xway_dev_ready; data->chip.select_chip = xway_select_chip; data->chip.write_buf = xway_write_buf; data->chip.read_buf = xway_read_buf; data->chip.read_byte = xway_read_byte; data->chip.chip_delay = 30; data->chip.ecc.mode = NAND_ECC_SOFT; data->chip.ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, data); nand_set_controller_data(&data->chip, data); /* load our CS from the DT. Either we find a valid 1 or default to 0 */ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); if (!err && cs == 1) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ ltq_ebu_w32(CPHYSADDR(data->nandaddr) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | cs_flag, EBU_NAND_CON); /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) return err; err = mtd_device_register(mtd, NULL, 0); if (err) nand_release(mtd); return err; }