Exemple #1
0
int __cpuinit xlr_wakeup_secondary_cpus(void)
{
	struct nlm_soc_info *nodep;
	unsigned int i, j, boot_cpu;
	volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);

	/*
	 *  In case of RMI boot, hit with NMI to get the cores
	 *  from bootloader to linux code.
	 */
	nodep = nlm_get_node(0);
	boot_cpu = hard_smp_processor_id();
	nlm_set_nmi_handler(nlm_rmiboot_preboot);
	for (i = 0; i < NR_CPUS; i++) {
		if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
			continue;
		nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
	}

	/* Fill up the coremask early */
	nodep->coremask = 1;
	for (i = 1; i < NLM_CORES_PER_NODE; i++) {
		for (j = 1000000; j > 0; j--) {
			if (cpu_ready[i * NLM_THREADS_PER_CORE])
				break;
			udelay(10);
		}
		if (j != 0)
			nodep->coremask |= (1u << i);
		else
			pr_err("Failed to wakeup core %d\n", i);
	}

	return 0;
}
Exemple #2
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase;
	uint32_t syscoremask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		syspcibase = nlm_get_sys_pcibase(n);
		if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
			break;

		/* read cores in reset from SYS */
		if (n != 0)
			nlm_node_init(n);
		nodep = nlm_get_node(n);
		syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
		/* The boot cpu */
		if (n == 0) {
			syscoremask |= 1;
			nodep->coremask = 1;
		}

		for (core = 0; core < NLM_CORES_PER_NODE; core++) {
			/* we will be on node 0 core 0 */
			if (n == 0 && core == 0)
				continue;

			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first hw thread is enabled */
			cpu = (n * NLM_CORES_PER_NODE + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (!xlp_wakeup_core(nodep->sysbase, n, core))
				continue;

			/* core is up */
			nodep->coremask |= 1u << core;

			/* spin until the hw threads sets their ready */
			wait_for_cpus(cpu, 0);
		}
	}
}
Exemple #3
0
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
	int cpu, node;
	uint64_t picbase;

	cpu = cpu_logical_map(logical_cpu);
	node = cpu / NLM_CPUS_PER_NODE;
	picbase = nlm_get_node(node)->picbase;

	if (action & SMP_CALL_FUNCTION)
		nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
	if (action & SMP_RESCHEDULE_YOURSELF)
		nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
}
Exemple #4
0
/*
 * Allocate a MSI vector on a link
 */
static int xlp_setup_msi(uint64_t lnkbase, int node, int link,
	struct msi_desc *desc)
{
	struct xlp_msi_data *md;
	struct msi_msg msg;
	unsigned long flags;
	int msivec, irt, lirq, xirq, ret;
	uint64_t msiaddr;

	/* Get MSI data for the link */
	lirq = PIC_PCIE_LINK_MSI_IRQ(link);
	xirq = nlm_irq_to_xirq(node, nlm_link_msiirq(link, 0));
	md = irq_get_handler_data(xirq);
	msiaddr = MSI_LINK_ADDR(node, link);

	spin_lock_irqsave(&md->msi_lock, flags);
	if (md->msi_alloc_mask == 0) {
		xlp_config_link_msi(lnkbase, lirq, msiaddr);
		/* switch the link IRQ to MSI range */
		if (cpu_is_xlp9xx())
			irt = PIC_9XX_IRT_PCIE_LINK_INDEX(link);
		else
			irt = PIC_IRT_PCIE_LINK_INDEX(link);
		nlm_setup_pic_irq(node, lirq, lirq, irt);
		nlm_pic_init_irt(nlm_get_node(node)->picbase, irt, lirq,
				 node * nlm_threads_per_node(), 1 /*en */);
	}

	/* allocate a MSI vec, and tell the bridge about it */
	msivec = fls(md->msi_alloc_mask);
	if (msivec == XLP_MSIVEC_PER_LINK) {
		spin_unlock_irqrestore(&md->msi_lock, flags);
		return -ENOMEM;
	}
	md->msi_alloc_mask |= (1u << msivec);
	spin_unlock_irqrestore(&md->msi_lock, flags);

	msg.address_hi = msiaddr >> 32;
	msg.address_lo = msiaddr & 0xffffffff;
	msg.data = 0xc00 | msivec;

	xirq = xirq + msivec;		/* msi mapped to global irq space */
	ret = irq_set_msi_desc(xirq, desc);
	if (ret < 0)
		return ret;

	write_msi_msg(xirq, &msg);
	return 0;
}
Exemple #5
0
static void nlm_init_pic_timer(void)
{
    uint64_t picbase = nlm_get_node(0)->picbase;
    u32 picfreq;

    nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
    if (current_cpu_data.cputype == CPU_XLR) {
        csrc_pic.mask	= CLOCKSOURCE_MASK(32);
        csrc_pic.read	= nlm_get_pic_timer32;
    } else {
        csrc_pic.mask	= CLOCKSOURCE_MASK(64);
        csrc_pic.read	= nlm_get_pic_timer;
    }
    csrc_pic.rating = 1000;
    picfreq = pic_timer_freq();
    clocksource_register_hz(&csrc_pic, picfreq);
    pr_info("PIC clock source added, frequency %d\n", picfreq);
}
Exemple #6
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase;
	uint32_t syscoremask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		syspcibase = nlm_get_sys_pcibase(n);
		if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
			break;

		/* read cores in reset from SYS and account for boot cpu */
		nlm_node_init(n);
		nodep = nlm_get_node(n);
		syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
		if (n == 0)
			syscoremask |= 1;

		for (core = 0; core < NLM_CORES_PER_NODE; core++) {
			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first thread is enabled */
			cpu = (n * NLM_CORES_PER_NODE + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (xlp_wakeup_core(nodep->sysbase, core))
				nodep->coremask |= 1u << core;
			else
				pr_err("Failed to enable core %d\n", core);
		}
	}
}
Exemple #7
0
static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
{
    uint64_t picbase = nlm_get_node(0)->picbase;

    return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
}
Exemple #8
0
static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
{
	struct nlm_soc_info *nodep;
	uint64_t syspcibase, fusebase;
	uint32_t syscoremask, mask, fusemask;
	int core, n, cpu;

	for (n = 0; n < NLM_NR_NODES; n++) {
		if (n != 0) {
			/* check if node exists and is online */
			if (cpu_is_xlp9xx()) {
				int b = xlp9xx_get_socbus(n);
				pr_info("Node %d SoC PCI bus %d.\n", n, b);
				if (b == 0)
					break;
			} else {
				syspcibase = nlm_get_sys_pcibase(n);
				if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
					break;
			}
			nlm_node_init(n);
		}

		/* read cores in reset from SYS */
		nodep = nlm_get_node(n);

		if (cpu_is_xlp9xx()) {
			fusebase = nlm_get_fuse_regbase(n);
			fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP5XX:
				mask = 0xff;
				break;
			case PRID_IMP_NETLOGIC_XLP9XX:
			default:
				mask = 0xfffff;
				break;
			}
		} else {
			fusemask = nlm_read_sys_reg(nodep->sysbase,
						SYS_EFUSE_DEVICE_CFG_STATUS0);
			switch (read_c0_prid() & PRID_IMP_MASK) {
			case PRID_IMP_NETLOGIC_XLP3XX:
				mask = 0xf;
				break;
			case PRID_IMP_NETLOGIC_XLP2XX:
				mask = 0x3;
				break;
			case PRID_IMP_NETLOGIC_XLP8XX:
			default:
				mask = 0xff;
				break;
			}
		}

		/*
		 * Fused out cores are set in the fusemask, and the remaining
		 * cores are renumbered to range 0 .. nactive-1
		 */
		syscoremask = (1 << hweight32(~fusemask & mask)) - 1;

		pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
		for (core = 0; core < nlm_cores_per_node(); core++) {
			/* we will be on node 0 core 0 */
			if (n == 0 && core == 0)
				continue;

			/* see if the core exists */
			if ((syscoremask & (1 << core)) == 0)
				continue;

			/* see if at least the first hw thread is enabled */
			cpu = (n * nlm_cores_per_node() + core)
						* NLM_THREADS_PER_CORE;
			if (!cpumask_test_cpu(cpu, wakeup_mask))
				continue;

			/* wake up the core */
			if (!xlp_wakeup_core(nodep->sysbase, n, core))
				continue;

			/* core is up */
			nodep->coremask |= 1u << core;

			/* spin until the hw threads sets their ready */
			if (!wait_for_cpus(cpu, 0))
				pr_err("Node %d : timeout core %d\n", n, core);
		}
	}
}