Beispiel #1
0
static void v2m_timer_init(void)
{
	writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
	writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);

	sp804_clocksource_init(MMIO_P2V(V2M_TIMER1));
	sp804_clockevents_init(MMIO_P2V(V2M_TIMER0), IRQ_V2M_TIMER0);
}
static void __init ct_ca9x4_timer_init(void)
{
	writel(0, MMIO_P2V(CT_CA9X4_TIMER0) + TIMER_CTRL);
	writel(0, MMIO_P2V(CT_CA9X4_TIMER1) + TIMER_CTRL);

	sp804_clocksource_init(MMIO_P2V(CT_CA9X4_TIMER1));
	sp804_clockevents_init(MMIO_P2V(CT_CA9X4_TIMER0), IRQ_CT_CA9X4_TIMER0);
}
static void __init ct_ca9x4_map_io(void)
{
#ifdef CONFIG_LOCAL_TIMERS
	twd_base = MMIO_P2V(A9_MPCORE_TWD);
#endif
	iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
}
static void ct_ca9x4_init_cpu_map(void)
{
	int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));

	for (i = 0; i < ncores; ++i)
		set_cpu_possible(i, true);
}
static void ct_ca9x4_smp_enable(unsigned int max_cpus)
{
	int i;
	for (i = 0; i < max_cpus; i++)
		set_cpu_present(i, true);

	scu_enable(MMIO_P2V(A9_MPCORE_SCU));
}
Beispiel #6
0
void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
    /*
     * Initialise the present map, which describes the set of CPUs
     * actually populated at the present time.
     */
    ct_desc->smp_enable(max_cpus);

    /*
     * Write the address of secondary startup into the
     * system-wide flags register. The boot monitor waits
     * until it receives a soft interrupt, and then the
     * secondary CPU branches to this address.
     */
    writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
    writel(BSYM(virt_to_phys(versatile_secondary_startup)),
           MMIO_P2V(V2M_SYS_FLAGSSET));
}
Beispiel #7
0
int v2m_cfg_read(u32 devfn, u32 *data)
{
	u32 val;

	devfn |= SYS_CFG_START;

	spin_lock(&v2m_cfg_lock);
	writel(0, MMIO_P2V(V2M_SYS_CFGSTAT));
	writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));

	mb();

	do {
		cpu_relax();
		val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
	} while (val == 0);

	*data = readl(MMIO_P2V(V2M_SYS_CFGDATA));
	spin_unlock(&v2m_cfg_lock);

	return !!(val & SYS_CFG_ERR);
}
Beispiel #8
0
int v2m_cfg_write(u32 devfn, u32 data)
{
	/* Configuration interface broken? */
	u32 val;

	printk("%s: writing %08x to %08x\n", __func__, data, devfn);

	devfn |= SYS_CFG_START | SYS_CFG_WRITE;

	spin_lock(&v2m_cfg_lock);
	val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
	writel(val & ~SYS_CFG_COMPLETE, MMIO_P2V(V2M_SYS_CFGSTAT));

	writel(data, MMIO_P2V(V2M_SYS_CFGDATA));
	writel(devfn, MMIO_P2V(V2M_SYS_CFGCTRL));

	do {
		val = readl(MMIO_P2V(V2M_SYS_CFGSTAT));
	} while (val == 0);
	spin_unlock(&v2m_cfg_lock);

	return !!(val & SYS_CFG_ERR);
}
Beispiel #9
0
static void ct_ca9x4_init_cpu_map(void)
{
	int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));

	if (ncores > nr_cpu_ids) {
		pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
			ncores, nr_cpu_ids);
		ncores = nr_cpu_ids;
	}

	for (i = 0; i < ncores; ++i)
		set_cpu_possible(i, true);

	set_smp_cross_call(gic_raise_softirq);
}
static void __init v2m_populate_ct_desc(void)
{
	int i;
	u32 current_tile_id;

	ct_desc = NULL;
	current_tile_id = readl(MMIO_P2V(V2M_SYS_PROCID0)) & V2M_CT_ID_MASK;

	for (i = 0; i < ARRAY_SIZE(ct_descs) && !ct_desc; ++i)
		if (ct_descs[i]->id == current_tile_id)
			ct_desc = ct_descs[i];

	if (!ct_desc)
		panic("vexpress: failed to populate core tile description "
		      "for tile ID 0x%8x\n", current_tile_id);
}
Beispiel #11
0
static void __init ct_ca9x4_init(void)
{
	int i;

#ifdef CONFIG_CACHE_L2X0
	void __iomem *l2x0_base = MMIO_P2V(CT_CA9X4_L2CC);

	/* set RAM latencies to 1 cycle for this core tile. */
	writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
	writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);

	l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
#endif

	for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
		amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);

	platform_device_register(&pmu_device);
}
Beispiel #12
0
static void __init v2m_timer_init(void)
{
	u32 scctrl;

	versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);

	/* Select 1MHz TIMCLK as the reference clock for SP804 timers */
	scctrl = readl(MMIO_P2V(V2M_SYSCTL + SCCTRL));
	scctrl |= SCCTRL_TIMEREN0SEL_TIMCLK;
	scctrl |= SCCTRL_TIMEREN1SEL_TIMCLK;
	writel(scctrl, MMIO_P2V(V2M_SYSCTL + SCCTRL));

	writel(0, MMIO_P2V(V2M_TIMER0) + TIMER_CTRL);
	writel(0, MMIO_P2V(V2M_TIMER1) + TIMER_CTRL);

	sp804_clocksource_init(MMIO_P2V(V2M_TIMER1));
	sp804_clockevents_init(MMIO_P2V(V2M_TIMER0), IRQ_V2M_TIMER0);
}
Beispiel #13
0
static unsigned int v2m_mmci_status(struct device *dev)
{
	return readl(MMIO_P2V(V2M_SYS_MCI)) & (1 << 0);
}
Beispiel #14
0
static void v2m_flash_set_vpp(int on)
{
	writel(on != 0, MMIO_P2V(V2M_SYS_FLASH));
}
Beispiel #15
0
static void v2m_flash_exit(void)
{
	writel(0, MMIO_P2V(V2M_SYS_FLASH));
}
Beispiel #16
0
static int v2m_flash_init(void)
{
	writel(0, MMIO_P2V(V2M_SYS_FLASH));
	return 0;
}
Beispiel #17
0
static void ct_ca9x4_smp_enable(unsigned int max_cpus)
{
	scu_enable(MMIO_P2V(A9_MPCORE_SCU));
}
static void __init ct_ca9x4_init_irq(void)
{
	gic_cpu_base_addr = MMIO_P2V(A9_MPCORE_GIC_CPU);
	gic_dist_init(0, MMIO_P2V(A9_MPCORE_GIC_DIST), 29);
	gic_cpu_init(0, gic_cpu_base_addr);
}
static void v2m_flash_set_vpp(struct platform_device *pdev, int on)
{
	writel(on != 0, MMIO_P2V(V2M_SYS_FLASH));
}
int __init p7_init_dma(void)
{
	int         err;
	int const   rev = p7_chiprev();

	pr_debug("p7: registering %s...\n", p7_dma_dev.dev.init_name);

	if (rev ==  P7_CHIPREV_R1) {
		/* P7 first revision initializes DMA controller in non secure mode
		 * when coming out of reset but it is not possible to switch back to
		 * secure mode. Since Linux / cores / L2 cache controller run in
		 * secure mode, and all DMA controller transactions going through ACP
		 * port are flagged as non secure, CPU and DMA accesses to the same
		 * address won't point to same L2 cache internal location.
		 * Therefore, we must disable DMA RAM to RAM ACP accesses (and
		 * bypass L2 cache) to perform transactions directly onto main AXI
		 * system bus (the one behind L2 cache).
		 */
#define P7_NIC_REMAP    P7_NIC
#define NIC_NOACP       (1U << 7)
		__raw_writel(NIC_NOACP, MMIO_P2V(P7_NIC_REMAP));

		/* On R1, DMA interrupts are not shared */
		p7_dma_dev.irq[0] = P7_R1_DMA_ABORT_IRQ;
		p7_dma_dev.irq[1] = P7_R1_DMA5_IRQ;
		p7_dma_pdata.flushp = true;
	}
	else if (rev == P7_CHIPREV_R2 ||
	         rev == P7_CHIPREV_R3) {
		/*
		 * P7_NIC_REMAP is write-only, we can't check the REMAP_DRAM bit
		 * value. The assumption is made that it is already set at this point,
		 * so we add it to our bitmask.
		 */
#define NIC_REMAP_DRAM  (1U)
		__raw_writel(NIC_REMAP_DRAM | NIC_NOACP, MMIO_P2V(P7_NIC_REMAP));

		p7_dma_pdata.flushp = true;
	}

	dma_cap_set(DMA_MEMCPY, p7_dma_pdata.cap_mask);
	dma_cap_set(DMA_SLAVE, p7_dma_pdata.cap_mask);
	dma_cap_set(DMA_CYCLIC, p7_dma_pdata.cap_mask);

	err = amba_device_register(&p7_dma_dev, &iomem_resource);
	if (err)
		panic("p7: failed to register %s (%d)\n",
		      p7_dma_dev.dev.init_name,
		      err);

	/*
	 * We want to store controller microcode into internal RAM for performance
	 * reasons.
	 * As amba_device holds a single resource and pl330 driver does not handle
	 * multiple memory resources, we have to reserve microcode memory region here.
	 * Related device must have been initialized (amba_device_register) before
	 * using dma_declare_coherent_memory.
	 * Moreover, dma_declare_coherent_memory must be performed before pl330
	 * driver loaded since it allocates microcode region at probing time.
	 */
	if (! (dma_declare_coherent_memory(&p7_dma_dev.dev,
	                                   p7_dma_ucode_addr(),
	                                   p7_dma_ucode_addr(),
	                                   p7_dma_ucode_sz(),
	                                   DMA_MEMORY_MAP |
	                                   DMA_MEMORY_EXCLUSIVE) &
	       DMA_MEMORY_MAP))
		/* Failure: will use DMA zone located in system RAM. */
		panic("p7: failed to map DMA controller microcode memory region [%08x:%08x]\n",
		      p7_dma_ucode_addr(),
		      p7_dma_ucode_addr() + p7_dma_ucode_sz() - 1);

	dev_info(&p7_dma_dev.dev,
			 "mapped microcode memory region [%08x:%08x]\n",
			 p7_dma_ucode_addr(),
			 p7_dma_ucode_addr() + p7_dma_ucode_sz() - 1);
	return 0;
}
Beispiel #21
0
static void __init v2m_init_early(void)
{
	ct_desc->init_early();
	versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
}
Beispiel #22
0
static void __init ct_ca9x4_init_irq(void)
{
	gic_init(0, 29, MMIO_P2V(A9_MPCORE_GIC_DIST),
		 MMIO_P2V(A9_MPCORE_GIC_CPU));
}
static void __init ct_ca9x4_map_io(void)
{
	twd_base = MMIO_P2V(A9_MPCORE_TWD);
	v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
}
Beispiel #24
0
static void __iomem *scu_base_addr(void)
{
	return MMIO_P2V(A9_MPCORE_SCU);
}
static void __init v2m_init_early(void)
{
	ct_desc->init_early();
	clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
	versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
}