Example #1
0
/*
 * XLP8XX/4XX/3XX/2XX:
 * The MSI-X interrupt handling is different from MSI, there are 32 MSI-X
 * interrupts generated by the PIC and each of these correspond to a MSI-X
 * vector (0-31) that can be assigned.
 *
 * We divide the MSI-X vectors to 8 per link and do a per-link allocation
 *
 * XLP9XX:
 * 32 MSI-X vectors are available per link, and the interrupts are not routed
 * thru the PIC. PIC ack not needed.
 *
 * Enable and disable done using standard MSI functions.
 */
static void xlp_msix_mask_ack(struct irq_data *d)
{
	struct xlp_msi_data *md;
	int link, msixvec;
	uint32_t status_reg, bit;

	msixvec = nlm_irq_msixvec(d->irq);
	link = nlm_irq_msixlink(msixvec);
	mask_msi_irq(d);
	md = irq_data_get_irq_handler_data(d);

	/* Ack MSI on bridge */
	if (cpu_is_xlp9xx()) {
		status_reg = PCIE_9XX_MSIX_STATUSX(link);
		bit = msixvec % XLP_MSIXVEC_PER_LINK;
	} else {
		status_reg = PCIE_MSIX_STATUS;
		bit = msixvec;
	}
	nlm_write_reg(md->lnkbase, status_reg, 1u << bit);

	/* Ack at eirr and PIC */
	ack_c0_eirr(PIC_PCIE_MSIX_IRQ(link));
	if (!cpu_is_xlp9xx())
		nlm_pic_ack(md->node->picbase,
				PIC_IRT_PCIE_MSIX_INDEX(msixvec));
}
Example #2
0
static int __init nlm_platform_xlpii_usb_init(void)
{
	int node;

	if (!cpu_is_xlpii())
		return 0;

	if (!cpu_is_xlp9xx()) {
		/* XLP 2XX single node */
		pr_info("Initializing 2XX USB Interface\n");
		nlm_xlpii_usb_hw_reset(0, 1);
		nlm_xlpii_usb_hw_reset(0, 2);
		nlm_xlpii_usb_hw_reset(0, 3);
		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlp2xx_usb_ack);
		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlp2xx_usb_ack);
		nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlp2xx_usb_ack);
		return 0;
	}

	/* XLP 9XX, multi-node */
	pr_info("Initializing 9XX/5XX USB Interface\n");
	for (node = 0; node < NLM_NR_NODES; node++) {
		if (!nlm_node_present(node))
			continue;
		nlm_xlpii_usb_hw_reset(node, 1);
		nlm_xlpii_usb_hw_reset(node, 2);
		nlm_xlpii_usb_hw_reset(node, 3);
		nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
		nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
		nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_2_IRQ, xlp9xx_usb_ack);
	}
	return 0;
}
Example #3
0
static int __init nlm_ahci_init(void)
{
	int node;

	if (!cpu_is_xlp9xx())
		return 0;
	for (node = 0; node < NLM_NR_NODES; node++)
		if (nlm_node_present(node))
			nlm_sata_firmware_init(node);
	return 0;
}
Example #4
0
static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
{
	uint32_t coremask, value;
	int count, resetreg;

	coremask = (1 << core);

	/* Enable CPU clock in case of 8xx/3xx */
	if (!cpu_is_xlpii()) {
		value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
		value &= ~coremask;
		nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
	}

	/* On 9XX, mark coherent first */
	if (cpu_is_xlp9xx()) {
		value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
		value &= ~coremask;
		nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
	}

	/* Remove CPU Reset */
	resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
	value = nlm_read_sys_reg(sysbase, resetreg);
	value &= ~coremask;
	nlm_write_sys_reg(sysbase, resetreg, value);

	/* We are done on 9XX */
	if (cpu_is_xlp9xx())
		return 1;

	/* Poll for CPU to mark itself coherent on other type of XLP */
	count = 100000;
	do {
		value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
	} while ((value & coremask) != 0 && --count > 0);

	return count != 0;
}
Example #5
0
static void xlp_msi_mask_ack(struct irq_data *d)
{
	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
	int link, vec;

	link = nlm_irq_msilink(d->irq);
	vec = nlm_irq_msivec(d->irq);
	xlp_msi_disable(d);

	/* Ack MSI on bridge */
	if (cpu_is_xlp9xx())
		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
	else
		nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);

	/* Ack at eirr and PIC */
	ack_c0_eirr(PIC_PCIE_LINK_MSI_IRQ(link));
	if (cpu_is_xlp9xx())
		nlm_pic_ack(md->node->picbase,
				PIC_9XX_IRT_PCIE_LINK_INDEX(link));
	else
		nlm_pic_ack(md->node->picbase, PIC_IRT_PCIE_LINK_INDEX(link));
}
Example #6
0
/*
 * Allocate a MSI vector on a link
 */
static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
	struct msi_desc *desc)
{
	struct xlp_msi_data *md;
	struct msi_msg msg;
	unsigned long flags;
	int msivec, irt, lirq, xirq, ret;
	uint64_t msiaddr;

	/* Get MSI data for the link */
	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
	md = irq_get_handler_data(xirq);
	msiaddr = MSI_LINK_ADDR(node, link);

	spin_lock_irqsave(&md->msi_lock, flags);
	if (md->msi_alloc_mask == 0) {
		xlp_config_link_msi(lnkbase, lirq, msiaddr);
		/* switch the link IRQ to MSI range */
		if (cpu_is_xlp9xx())
			irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
		else
			irt = PIC_IRT_PCIE_LINK_INDEX(link);
		nlm_setup_pic_irq(node, lirq, lirq, irt);
		nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
				 node * nlm_threads_per_node(), 1 /*en */);
	}

	/* allocate a MSI vec, and tell the bridge about it */
	msivec = fls(md->msi_alloc_mask);
	if (msivec == XLP_MSIVEC_PER_LINK) {
		spin_unlock_irqrestore(&md->msi_lock, flags);
		return -ENOMEM;
	}
	md->msi_alloc_mask |= (1u << msivec);
	spin_unlock_irqrestore(&md->msi_lock, flags);

	msg.address_hi = msiaddr >> 32;
	msg.address_lo = msiaddr & 0xffffffff;
	msg.data = 0xc00 | msivec;

	xirq = xirq + msivec;		/* msi mapped to global irq space */
	ret = irq_set_msi_desc(xirq, desc);
	if (ret < 0)
		return ret;

	write_msi_msg(xirq, &msg);
	return 0;
}
Example #7
0
static void xlp_msi_disable(struct irq_data *d)
{
	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
	unsigned long flags;
	int vec;

	vec = nlm_irq_msivec(d->irq);
	spin_lock_irqsave(&md->msi_lock, flags);
	md->msi_enabled_mask &= ~(1u << vec);
	if (cpu_is_xlp9xx())
		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_EN,
				md->msi_enabled_mask);
	else
		nlm_write_reg(md->lnkbase, PCIE_MSI_EN, md->msi_enabled_mask);
	spin_unlock_irqrestore(&md->msi_lock, flags);
}
Example #8
0
static void xlp_msi_mask_ack(struct irq_data *d)
{
	struct xlp_msi_data *md = irq_data_get_irq_handler_data(d);
	int link, vec;

	link = nlm_irq_msilink(d->irq);
	vec = nlm_irq_msivec(d->irq);
	xlp_msi_disable(d);

	/* Ack MSI on bridge */
	if (cpu_is_xlp9xx())
		nlm_write_reg(md->lnkbase, PCIE_9XX_MSI_STATUS, 1u << vec);
	else
		nlm_write_reg(md->lnkbase, PCIE_MSI_STATUS, 1u << vec);

}
Example #9
0
/*
 * Switch a link to MSI-X mode
 */
static void xlp_config_link_msix(uint64_t lnkbase, int lirq, uint64_t msixaddr)
{
	u32 val;

	val = nlm_read_reg(lnkbase, 0x2C);
	if ((val & 0x80000000U) == 0) {
		val |= 0x80000000U;
		nlm_write_reg(lnkbase, 0x2C, val);
	}

	if (cpu_is_xlp9xx()) {
		val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
		if ((val & 0x200) == 0) {
			val |= 0x200;		/* MSI Interrupt enable */
			nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
		}
	} else {
		val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
		if ((val & 0x200) == 0) {
			val |= 0x200;		/* MSI Interrupt enable */
			nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
		}
	}

	val = nlm_read_reg(lnkbase, 0x1);	/* CMD */
	if ((val & 0x0400) == 0) {
		val |= 0x0400;
		nlm_write_reg(lnkbase, 0x1, val);
	}

	/* Update IRQ in the PCI irq reg */
	val = nlm_read_pci_reg(lnkbase, 0xf);
	val &= ~0x1fu;
	val |= (1 << 8) | lirq;
	nlm_write_pci_reg(lnkbase, 0xf, val);

	if (cpu_is_xlp9xx()) {
		/* MSI-X addresses */
		nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_BASE,
				msixaddr >> 8);
		nlm_write_reg(lnkbase, PCIE_9XX_BRIDGE_MSIX_ADDR_LIMIT,
				(msixaddr + MSI_ADDR_SZ) >> 8);
	} else {
Example #10
0
/*
 * Setup a PCIe link for MSI.  By default, the links are in
 * legacy interrupt mode.  We will switch them to MSI mode
 * at the first MSI request.
 */
static void xlp_config_link_msi(uint64_t lnkbase, int lirq, uint64_t msiaddr)
{
	u32 val;

	if (cpu_is_xlp9xx()) {
		val = nlm_read_reg(lnkbase, PCIE_9XX_INT_EN0);
		if ((val & 0x200) == 0) {
			val |= 0x200;		/* MSI Interrupt enable */
			nlm_write_reg(lnkbase, PCIE_9XX_INT_EN0, val);
		}
	} else {
		val = nlm_read_reg(lnkbase, PCIE_INT_EN0);
		if ((val & 0x200) == 0) {
			val |= 0x200;
			nlm_write_reg(lnkbase, PCIE_INT_EN0, val);
		}
	}

	val = nlm_read_reg(lnkbase, 0x1);	/* CMD */
	if ((val & 0x0400) == 0) {
		val |= 0x0400;
		nlm_write_reg(lnkbase, 0x1, val);
	}

	/* Update IRQ in the PCI irq reg */
	val = nlm_read_pci_reg(lnkbase, 0xf);
	val &= ~0x1fu;
	val |= (1 << 8) | lirq;
	nlm_write_pci_reg(lnkbase, 0xf, val);

	/* MSI addr */
	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRH, msiaddr >> 32);
	nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_ADDRL, msiaddr & 0xffffffff);

	/* MSI cap for bridge */
	val = nlm_read_reg(lnkbase, PCIE_BRIDGE_MSI_CAP);
	if ((val & (1 << 16)) == 0) {
		val |= 0xb << 16;		/* mmc32, msi enable */
		nlm_write_reg(lnkbase, PCIE_BRIDGE_MSI_CAP, val);
	}
}
Example #11
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase, fusebase;
	uint32_t syscoremask, mask, fusemask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		if (n != 0) {
			/* check if node exists and is online */
			if (cpu_is_xlp9xx()) {
				int b = xlp9xx_get_socbus(n);
				pr_info("Node %d SoC PCI bus %d.\n", n, b);
				if (b == 0)
					break;
			} else {
				syspcibase = nlm_get_sys_pcibase(n);
				if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
					break;
			}
			nlm_node_init(n);
		}

		/* read cores in reset from SYS */
		nodep = nlm_get_node(n);

		if (cpu_is_xlp9xx()) {
			fusebase = nlm_get_fuse_regbase(n);
			fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP5XX:
				mask = 0xff;
				break;
			case PRID_IMP_NETLOGIC_XLP9XX:
			default:
				mask = 0xfffff;
				break;
			}
		} else {
			fusemask = nlm_read_sys_reg(nodep->sysbase,
						SYS_EFUSE_DEVICE_CFG_STATUS0);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP3XX:
				mask = 0xf;
				break;
			case PRID_IMP_NETLOGIC_XLP2XX:
				mask = 0x3;
				break;
			case PRID_IMP_NETLOGIC_XLP8XX:
			default:
				mask = 0xff;
				break;
			}
		}

		/*
		 * Fused out cores are set in the fusemask, and the remaining
		 * cores are renumbered to range 0 .. nactive-1
		 */
		syscoremask = (1 << hweight32(~fusemask & mask)) - 1;

		pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
		for (core = 0; core < nlm_cores_per_node(); core++) {
			/* we will be on node 0 core 0 */
			if (n == 0 && core == 0)
				continue;

			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first hw thread is enabled */
			cpu = (n * nlm_cores_per_node() + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (!xlp_wakeup_core(nodep->sysbase, n, core))
				continue;

			/* core is up */
			nodep->coremask |= 1u << core;

			/* spin until the hw threads sets their ready */
			if (!wait_for_cpus(cpu, 0))
				pr_err("Node %d : timeout core %d\n", n, core);
		}
	}
}