/* * Switch a link to MSI-X mode */ static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr) { u32 val; val = nlm_read_reg(lnkbase, 0x2C); if ((val & 0x80000000U) == 0) { val |= 0x80000000U; nlm_write_reg(lnkbase, 0x2C, val); } if (cpu_is_xlp9xx()) { val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); } } else { val = nlm_read_reg(lnkbase, PCIE_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_INT_EN0, val); } } val = nlm_read_reg(lnkbase, 0x1); /* CMD */ if ((val & 0x0400) == 0) { val |= 0x0400; nlm_write_reg(lnkbase, 0x1, val); } /* Update IRQ in the PCI irq reg */ val = nlm_read_pci_reg(lnkbase, 0xf); val &= ~0x1fu; val |= (1 << 8) | lirq; nlm_write_pci_reg(lnkbase, 0xf, val); if (cpu_is_xlp9xx()) { /* MSI-X addresses */ nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8); nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT, (msixaddr + MSI_ADDR_SZ) >> 8); } else {
/* * Setup a PCIe link for MSI. By default, the links are in * legacy interrupt mode. We will switch them to MSI mode * at the first MSI request. */ static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr) { u32 val; if (cpu_is_xlp9xx()) { val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); } } else { val = nlm_read_reg(lnkbase, PCIE_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; nlm_write_reg(lnkbase, PCIE_INT_EN0, val); } } val = nlm_read_reg(lnkbase, 0x1); /* CMD */ if ((val & 0x0400) == 0) { val |= 0x0400; nlm_write_reg(lnkbase, 0x1, val); } /* Update IRQ in the PCI irq reg */ val = nlm_read_pci_reg(lnkbase, 0xf); val &= ~0x1fu; val |= (1 << 8) | lirq; nlm_write_pci_reg(lnkbase, 0xf, val); /* MSI addr */ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32); nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff); /* MSI cap for bridge */ val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP); if ((val & (1 << 16)) == 0) { val |= 0xb << 16; /* mmc32, msi enable */ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val); } }
void prom_putchar(char c) { uint64_t uartbase; #if defined(CONFIG_CPU_XLP) uartbase = nlm_get_uart_regbase(0, 0); #elif defined(CONFIG_CPU_XLR) uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET); #endif while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0) ; nlm_write_reg(uartbase, UART_TX, c); }
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) { struct nlm_soc_info *nodep; uint64_t syspcibase; uint32_t syscoremask; int core, n, cpu; for (n = 0; n < NLM_NR_NODES; n++) { syspcibase = nlm_get_sys_pcibase(n); if (nlm_read_reg(syspcibase, 0) == 0xffffffff) break; /* read cores in reset from SYS */ if (n != 0) nlm_node_init(n); nodep = nlm_get_node(n); syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET); /* The boot cpu */ if (n == 0) { syscoremask |= 1; nodep->coremask = 1; } for (core = 0; core < NLM_CORES_PER_NODE; core++) { /* we will be on node 0 core 0 */ if (n == 0 && core == 0) continue; /* see if the core exists */ if ((syscoremask & (1 << core)) == 0) continue; /* see if at least the first hw thread is enabled */ cpu = (n * NLM_CORES_PER_NODE + core) * NLM_THREADS_PER_CORE; if (!cpumask_test_cpu(cpu, wakeup_mask)) continue; /* wake up the core */ if (!xlp_wakeup_core(nodep->sysbase, n, core)) continue; /* core is up */ nodep->coremask |= 1u << core; /* spin until the hw threads sets their ready */ wait_for_cpus(cpu, 0); } } }
unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) { uint64_t uartbase; unsigned int value; /* sign extend to 64 bits, if needed */ uartbase = (uint64_t)(long)p->membase; value = nlm_read_reg(uartbase, offset); /* See XLR/XLS errata */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; return value; }
unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset) { uint64_t uartbase; unsigned int value; /* */ uartbase = (uint64_t)(long)p->membase; value = nlm_read_reg(uartbase, offset); /* */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; return value; }
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) { struct nlm_soc_info *nodep; uint64_t syspcibase; uint32_t syscoremask; int core, n, cpu; for (n = 0; n < NLM_NR_NODES; n++) { syspcibase = nlm_get_sys_pcibase(n); if (nlm_read_reg(syspcibase, 0) == 0xffffffff) break; /* read cores in reset from SYS and account for boot cpu */ nlm_node_init(n); nodep = nlm_get_node(n); syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET); if (n == 0) syscoremask |= 1; for (core = 0; core < NLM_CORES_PER_NODE; core++) { /* see if the core exists */ if ((syscoremask & (1 << core)) == 0) continue; /* see if at least the first thread is enabled */ cpu = (n * NLM_CORES_PER_NODE + core) * NLM_THREADS_PER_CORE; if (!cpumask_test_cpu(cpu, wakeup_mask)) continue; /* wake up the core */ if (xlp_wakeup_core(nodep->sysbase, core)) nodep->coremask |= 1u << core; else pr_err("Failed to enable core %d\n", core); } } }
static unsigned int nlm_xlp_uart_in(struct uart_port *p, int offset) { return nlm_read_reg(p->iobase, offset); }
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) { struct nlm_soc_info *nodep; uint64_t syspcibase, fusebase; uint32_t syscoremask, mask, fusemask; int core, n, cpu; for (n = 0; n < NLM_NR_NODES; n++) { if (n != 0) { /* check if node exists and is online */ if (cpu_is_xlp9xx()) { int b = xlp9xx_get_socbus(n); pr_info("Node %d SoC PCI bus %d.\n", n, b); if (b == 0) break; } else { syspcibase = nlm_get_sys_pcibase(n); if (nlm_read_reg(syspcibase, 0) == 0xffffffff) break; } nlm_node_init(n); } /* read cores in reset from SYS */ nodep = nlm_get_node(n); if (cpu_is_xlp9xx()) { fusebase = nlm_get_fuse_regbase(n); fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6); switch (read_c0_prid() & PRID_IMP_MASK) { case PRID_IMP_NETLOGIC_XLP5XX: mask = 0xff; break; case PRID_IMP_NETLOGIC_XLP9XX: default: mask = 0xfffff; break; } } else { fusemask = nlm_read_sys_reg(nodep->sysbase, SYS_EFUSE_DEVICE_CFG_STATUS0); switch (read_c0_prid() & PRID_IMP_MASK) { case PRID_IMP_NETLOGIC_XLP3XX: mask = 0xf; break; case PRID_IMP_NETLOGIC_XLP2XX: mask = 0x3; break; case PRID_IMP_NETLOGIC_XLP8XX: default: mask = 0xff; break; } } /* * Fused out cores are set in the fusemask, and the remaining * cores are renumbered to range 0 .. nactive-1 */ syscoremask = (1 << hweight32(~fusemask & mask)) - 1; pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask); for (core = 0; core < nlm_cores_per_node(); core++) { /* we will be on node 0 core 0 */ if (n == 0 && core == 0) continue; /* see if the core exists */ if ((syscoremask & (1 << core)) == 0) continue; /* see if at least the first hw thread is enabled */ cpu = (n * nlm_cores_per_node() + core) * NLM_THREADS_PER_CORE; if (!cpumask_test_cpu(cpu, wakeup_mask)) continue; /* wake up the core */ if (!xlp_wakeup_core(nodep->sysbase, n, core)) continue; /* core is up */ nodep->coremask |= 1u << core; /* spin until the hw threads sets their ready */ if (!wait_for_cpus(cpu, 0)) pr_err("Node %d : timeout core %d\n", n, core); } } }