/* * XLP8XX/4XX/3XX/2XX: * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X * interrupts generated by the PIC and each of these correspond to a MSI-X * vector (0-31) that can be assigned. * * We divide the MSI-X vectors to 8 per link and do a per-link allocation * * XLP9XX: * 32 MSI-X vectors are available per link, and the interrupts are not routed * thru the PIC. PIC ack not needed. * * Enable and disable done using standard MSI functions. */ static void xlp_msix_mask_ack(struct irq_data *d) { struct xlp_msi_data *md; int link, msixvec; uint32_t status_reg, bit; msixvec = nlm_irq_msixvec(d->irq); link = nlm_irq_msixlink(msixvec); mask_msi_irq(d); md = irq_data_get_irq_handler_data(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) { status_reg = PCIE_9XX_MSIX_STATUSX(link); bit = msixvec % XLP_MSIXVEC_PER_LINK; } else { status_reg = PCIE_MSIX_STATUS; bit = msixvec; } nlm_write_reg(md->lnkbase, status_reg, 1u << bit); /* Ack at eirr and PIC */ ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link)); if (!cpu_is_xlp9xx()) nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_MSIX_INDEX(msixvec)); }
static void xlp_msi_disable(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); unsigned long flags; int vec; vec = nlm_irq_msivec(d->irq); spin_lock_irqsave(&md->msi_lock, flags); md->msi_enabled_mask &= ~(1u << vec); if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN, md->msi_enabled_mask); else nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask); spin_unlock_irqrestore(&md->msi_lock, flags); }
static void xlp_msi_mask_ack(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); int link, vec; link = nlm_irq_msilink(d->irq); vec = nlm_irq_msivec(d->irq); xlp_msi_disable(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); else nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); }
/* * Switch a link to MSI-X mode */ static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr) { u32 val; val = nlm_read_reg(lnkbase, 0x2C); if ((val & 0x80000000U) == 0) { val |= 0x80000000U; nlm_write_reg(lnkbase, 0x2C, val); } if (cpu_is_xlp9xx()) { val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); } } else { val = nlm_read_reg(lnkbase, PCIE_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_INT_EN0, val); } } val = nlm_read_reg(lnkbase, 0x1); /* CMD */ if ((val & 0x0400) == 0) { val |= 0x0400; nlm_write_reg(lnkbase, 0x1, val); } /* Update IRQ in the PCI irq reg */ val = nlm_read_pci_reg(lnkbase, 0xf); val &= ~0x1fu; val |= (1 << 8) | lirq; nlm_write_pci_reg(lnkbase, 0xf, val); if (cpu_is_xlp9xx()) { /* MSI-X addresses */ nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE, msixaddr >> 8); nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT, (msixaddr + MSI_ADDR_SZ) >> 8); } else {
static void nlm_linux_exit(void) { uint64_t gpiobase; gpiobase = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET); /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */ nlm_write_reg(gpiobase, GPIO_SWRESET_REG, 1); for ( ; ; ) cpu_wait(); }
/* * Setup a PCIe link for MSI. By default, the links are in * legacy interrupt mode. We will switch them to MSI mode * at the first MSI request. */ static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr) { u32 val; if (cpu_is_xlp9xx()) { val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; /* MSI Interrupt enable */ nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val); } } else { val = nlm_read_reg(lnkbase, PCIE_INT_EN0); if ((val & 0x200) == 0) { val |= 0x200; nlm_write_reg(lnkbase, PCIE_INT_EN0, val); } } val = nlm_read_reg(lnkbase, 0x1); /* CMD */ if ((val & 0x0400) == 0) { val |= 0x0400; nlm_write_reg(lnkbase, 0x1, val); } /* Update IRQ in the PCI irq reg */ val = nlm_read_pci_reg(lnkbase, 0xf); val &= ~0x1fu; val |= (1 << 8) | lirq; nlm_write_pci_reg(lnkbase, 0xf, val); /* MSI addr */ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32); nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff); /* MSI cap for bridge */ val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP); if ((val & (1 << 16)) == 0) { val |= 0xb << 16; /* mmc32, msi enable */ nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val); } }
void prom_putchar(char c) { uint64_t uartbase; #if defined(CONFIG_CPU_XLP) uartbase = nlm_get_uart_regbase(0, 0); #elif defined(CONFIG_CPU_XLR) uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET); #endif while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0) ; nlm_write_reg(uartbase, UART_TX, c); }
static void xlp_msi_mask_ack(struct irq_data *d) { struct xlp_msi_data *md = irq_data_get_irq_handler_data(d); int link, vec; link = nlm_irq_msilink(d->irq); vec = nlm_irq_msivec(d->irq); xlp_msi_disable(d); /* Ack MSI on bridge */ if (cpu_is_xlp9xx()) nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec); else nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec); /* Ack at eirr and PIC */ ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link)); if (cpu_is_xlp9xx()) nlm_pic_ack(md->node->picbase, PIC_9XX_IRT_PCIE_LINK_INDEX(link)); else nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link)); }
void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) { uint64_t uartbase; /* sign extend to 64 bits, if needed */ uartbase = (uint64_t)(long)p->membase; /* See XLR/XLS errata */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; nlm_write_reg(uartbase, offset, value); }
void nlm_xlr_uart_out(struct uart_port *p, int offset, int value) { uint64_t uartbase; /* */ uartbase = (uint64_t)(long)p->membase; /* */ if (offset == UART_MSR) value ^= 0xF0; else if (offset == UART_MCR) value ^= 0x3; nlm_write_reg(uartbase, offset, value); }
static void xlp_add_soc_child(device_t pcib, device_t dev, int b, int s, int f) { struct pci_devinfo *dinfo; struct xlp_devinfo *xlp_dinfo; struct soc_dev_desc *si; uint64_t pcibase; int domain, node, irt, irq, flags, devoffset, num; uint16_t devid; domain = pcib_get_domain(dev); node = s / 8; devoffset = XLP_HDR_OFFSET(node, 0, s % 8, f); if (!nlm_dev_exists(devoffset)) return; /* Find if there is a desc for the SoC device */ devid = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_DEVICE, 2); si = xlp_find_soc_desc(devid); /* update flags and irq from desc if available */ irq = 0; flags = 0; if (si != NULL) { if (si->irqbase != 0) irq = si->irqbase + si->ndevs; flags = si->flags; si->ndevs++; } /* skip internal devices */ if ((flags & INTERNAL_DEV) != 0) return; /* PCIe interfaces are special, bug in Ax */ if (devid == PCI_DEVICE_ID_NLM_PCIE) { xlp_add_irq(node, xlp_pcie_link_irt(f), PIC_PCIE_0_IRQ + f); } else { /* Stash intline and pin in shadow reg for devices */ pcibase = nlm_pcicfg_base(devoffset); irt = nlm_irtstart(pcibase); num = nlm_irtnum(pcibase); if (irq != 0 && num > 0) { xlp_add_irq(node, irt, irq); nlm_write_reg(pcibase, XLP_PCI_DEVSCRATCH_REG0, (1 << 8) | irq); } } dinfo = pci_read_device(pcib, domain, b, s, f, sizeof(*xlp_dinfo)); if (dinfo == NULL) return; xlp_dinfo = (struct xlp_devinfo *)dinfo; xlp_dinfo->irq = irq; xlp_dinfo->flags = flags; /* memory resource from ecfg space, if MEM_RES_EMUL is set */ if ((flags & MEM_RES_EMUL) != 0) xlp_dinfo->mem_res_start = XLP_DEFAULT_IO_BASE + devoffset + XLP_IO_PCI_HDRSZ; pci_add_child(dev, dinfo); }
static void nlm_xlp_uart_out(struct uart_port *p, int offset, int value) { nlm_write_reg(p->iobase, offset, value); }