Пример #1
0
int __init mcpm_sync_init(
	void (*power_up_setup)(unsigned int affinity_level))
{
	unsigned int i, j, mpidr, this_cluster;

	BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
	BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));

	/*
	 * Set initial CPU and cluster states.
	 * Only one cluster is assumed to be active at this point.
	 */
	for (i = 0; i < MAX_NR_CLUSTERS; i++) {
		mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
		for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
			mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
	}
	mpidr = read_cpuid_mpidr();
	this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
	for_each_online_cpu(i)
		mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
	mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
	sync_cache_w(&mcpm_sync);

	if (power_up_setup) {
		mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
		sync_cache_w(&mcpm_power_up_setup_phys);
	}

	return 0;
}
Пример #2
0
/*
 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
 * @state: the final state of the cluster:
 *     CLUSTER_UP: no destructive teardown was done and the cluster has been
 *         restored to the previous state (CPU cache still active); or
 *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
 *         (CPU cache disabled, L2 cache either enabled or disabled).
 */
void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
{
	dmb();
	mcpm_sync.clusters[cluster].cluster = state;
	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
	dsb_sev();
}
Пример #3
0
static void per_cpu_sw_state_wr(u32 cpu, int val)
{
	per_cpu(per_cpu_sw_state, cpu) = val;
	dmb();
	sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu)));
	dsb_sev();
}
Пример #4
0
/*
 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
 *    cluster can be torn down without disrupting this CPU.
 *    To avoid deadlocks, this must be called before a CPU is powered down.
 *    The CPU cache (SCTRL.C bit) is expected to be off.
 *    However L2 cache might or might not be active.
 */
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
{
	dmb();
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
	dsb_sev();
}
Пример #5
0
static void __init sti_smp_prepare_cpus(unsigned int max_cpus)
{
    struct device_node *np;
    void __iomem *scu_base;
    u32 __iomem *cpu_strt_ptr;
    u32 release_phys;
    int cpu;
    unsigned long entry_pa = virt_to_phys(sti_secondary_startup);

    np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");

    if (np) {
        scu_base = of_iomap(np, 0);
        scu_enable(scu_base);
        of_node_put(np);
    }

    if (max_cpus <= 1)
        return;

    for_each_possible_cpu(cpu) {

        np = of_get_cpu_node(cpu, NULL);

        if (!np)
            continue;

        if (of_property_read_u32(np, "cpu-release-addr",
                                 &release_phys)) {
            pr_err("CPU %d: missing or invalid cpu-release-addr "
                   "property\n", cpu);
            continue;
        }

        /*
         * holding pen is usually configured in SBC DMEM but can also be
         * in RAM.
         */

        if (!memblock_is_memory(release_phys))
            cpu_strt_ptr =
                ioremap(release_phys, sizeof(release_phys));
        else
            cpu_strt_ptr =
                (u32 __iomem *)phys_to_virt(release_phys);

        __raw_writel(entry_pa, cpu_strt_ptr);

        /*
         * wmb so that data is actually written
         * before cache flush is done
         */
        smp_wmb();
        sync_cache_w(cpu_strt_ptr);

        if (!memblock_is_memory(release_phys))
            iounmap(cpu_strt_ptr);
    }
}
Пример #6
0
int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid)
{
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		pr_err(SPCLOG "unable to allocate mem\n");
		return -ENOMEM;
	}

	info->baseaddr = baseaddr;
	info->a15_clusid = a15_clusid;

	/*
	 * Multi-cluster systems may need this data when non-coherent, during
	 * cluster power-up/power-down. Make sure driver info reaches main
	 * memory.
	 */
	sync_cache_w(info);
	sync_cache_w(&info);

	return 0;
}
Пример #7
0
/*
 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
 * This function should be called by the last man, after local CPU teardown
 * is complete.  CPU cache expected to be active.
 *
 * Returns:
 *     false: the critical section was not entered because an inbound CPU was
 *         observed, or the cluster is already being set up;
 *     true: the critical section was entered: it is now safe to tear down the
 *         cluster.
 */
bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
{
	unsigned int i;
	struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];

	/* Warn inbound CPUs that the cluster is being torn down: */
	c->cluster = CLUSTER_GOING_DOWN;
	sync_cache_w(&c->cluster);

	/* Back out if the inbound cluster is already in the critical region: */
	sync_cache_r(&c->inbound);
	if (c->inbound == INBOUND_COMING_UP)
		goto abort;

	/*
	 * Wait for all CPUs to get out of the GOING_DOWN state, so that local
	 * teardown is complete on each CPU before tearing down the cluster.
	 *
	 * If any CPU has been woken up again from the DOWN state, then we
	 * shouldn't be taking the cluster down at all: abort in that case.
	 */
	sync_cache_r(&c->cpus);
	for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
		int cpustate;

		if (i == cpu)
			continue;

		while (1) {
			cpustate = c->cpus[i].cpu;
			if (cpustate != CPU_GOING_DOWN)
				break;

			wfe();
			sync_cache_r(&c->cpus[i].cpu);
		}

		switch (cpustate) {
		case CPU_DOWN:
			continue;

		default:
			goto abort;
		}
	}

	return true;

abort:
	__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
	return false;
}
Пример #8
0
static int cpu_suspend_alloc_sp(void)
{
	void *ctx_ptr;
	/* ctx_ptr is an array of physical addresses */
	ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(u32), GFP_KERNEL);

	if (WARN_ON(!ctx_ptr))
		return -ENOMEM;
	sleep_save_sp.save_ptr_stash = ctx_ptr;
	sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
	sync_cache_w(&sleep_save_sp);
	return 0;
}
Пример #9
0
int __init coherency_init(void)
{
	struct device_node *np;

	/*
	 * The coherency fabric is needed:
	 * - For coherency between processors on Armada XP, so only
	 *   when SMP is enabled.
	 * - For coherency between the processor and I/O devices, but
	 *   this coherency requires many pre-requisites (write
	 *   allocate cache policy, shareable pages, SMP bit set) that
	 *   are only meant in SMP situations.
	 *
	 * Note that this means that on Armada 370, there is currently
	 * no way to use hardware I/O coherency, because even when
	 * CONFIG_SMP is enabled, is_smp() returns false due to the
	 * Armada 370 being a single-core processor. To lift this
	 * limitation, we would have to find a way to make the cache
	 * policy set to write-allocate (on all Armada SoCs), and to
	 * set the shareable attribute in page tables (on all Armada
	 * SoCs except the Armada 370). Unfortunately, such decisions
	 * are taken very early in the kernel boot process, at a point
	 * where we don't know yet on which SoC we are running.
	 */
	if (!is_smp())
		return 0;

	np = of_find_matching_node(NULL, of_coherency_table);
	if (np) {
		struct resource res;
		pr_info("Initializing Coherency fabric\n");
		of_address_to_resource(np, 0, &res);
		coherency_phys_base = res.start;
		/*
		 * Ensure secondary CPUs will see the updated value,
		 * which they read before they join the coherency
		 * fabric, and therefore before they are coherent with
		 * the boot CPU cache.
		 */
		sync_cache_w(&coherency_phys_base);
		coherency_base = of_iomap(np, 0);
		coherency_cpu_base = of_iomap(np, 1);
		set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
		of_node_put(np);
	}

	return 0;
}
void __init socfpga_sysmgr_init(void)
{
	struct device_node *np;

	np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr");

	if (of_property_read_u32(np, "cpu1-start-addr",
			(u32 *) &socfpga_cpu1start_addr))
		pr_err("SMP: Need cpu1-start-addr in device tree.\n");

	/* Ensure that socfpga_cpu1start_addr is visible to other CPUs */
	smp_wmb();
	sync_cache_w(&socfpga_cpu1start_addr);

	sys_manager_base_addr = of_iomap(np, 0);

	np = of_find_compatible_node(NULL, NULL, "altr,rst-mgr");
	rst_manager_base_addr = of_iomap(np, 0);

	np = of_find_compatible_node(NULL, NULL, "altr,sdr-ctl");
	sdr_ctl_base_addr = of_iomap(np, 0);
}
Пример #11
0
int __init coherency_init(void)
{
	struct device_node *np;

	np = of_find_matching_node(NULL, of_coherency_table);
	if (np) {
		struct resource res;
		pr_info("Initializing Coherency fabric\n");
		of_address_to_resource(np, 0, &res);
		coherency_phys_base = res.start;
		/*
		 * Ensure secondary CPUs will see the updated value,
		 * which they read before they join the coherency
		 * fabric, and therefore before they are coherent with
		 * the boot CPU cache.
		 */
		sync_cache_w(&coherency_phys_base);
		coherency_base = of_iomap(np, 0);
		coherency_cpu_base = of_iomap(np, 1);
		set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
	}

	return 0;
}
Пример #12
0
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
{
	unsigned long val = ptr ? virt_to_phys(ptr) : 0;
	mcpm_entry_vectors[cluster][cpu] = val;
	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
Пример #13
0
/*
 * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
 *    This must be called at the point of committing to teardown of a CPU.
 *    The CPU cache (SCTRL.C bit) is expected to still be active.
 */
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
{
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
}
Пример #14
0
/*
 * Write pen_release in a way that is guaranteed to be visible to all
 * observers, irrespective of whether they're taking part in coherency
 * or not.  This is necessary for the hotplug code to work reliably.
 */
static void write_pen_release(int val)
{
	pen_release = val;
	smp_wmb();
	sync_cache_w(&pen_release);
}
Пример #15
0
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
{
	unsigned long val = ptr ? __pa_symbol(ptr) : 0;
	mcpm_entry_vectors[cluster][cpu] = val;
	sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
}
Пример #16
0
static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;
	struct device_node *np;

	np = of_find_matching_node(NULL, clk_ids);
	if (!np)
		return -ENODEV;

	clk_base = of_iomap(np, 0);
	if (!clk_base)
		return -ENOMEM;

	/*
	 * write the address of secondary startup into the clkc register
	 * at offset 0x2bC, then write the magic number 0x3CAF5D62 to the
	 * clkc register at offset 0x2b8, which is what boot rom code is
	 * waiting for. This would wake up the secondary core from WFE
	 */
#define SIRFSOC_CPU1_JUMPADDR_OFFSET 0x2bc
	__raw_writel(__pa_symbol(sirfsoc_secondary_startup),
		clk_base + SIRFSOC_CPU1_JUMPADDR_OFFSET);

#define SIRFSOC_CPU1_WAKEMAGIC_OFFSET 0x2b8
	__raw_writel(0x3CAF5D62,
		clk_base + SIRFSOC_CPU1_WAKEMAGIC_OFFSET);

	/* make sure write buffer is drained */
	mb();

	spin_lock(&boot_lock);

	/*
	 * The secondary processor is waiting to be released from
	 * the holding pen - release it, then wait for it to flag
	 * that it has been released by resetting prima2_pen_release.
	 *
	 * Note that "prima2_pen_release" is the hardware CPU ID, whereas
	 * "cpu" is Linux's internal ID.
	 */
	prima2_pen_release = cpu_logical_map(cpu);
	sync_cache_w(&prima2_pen_release);

	/*
	 * Send the secondary CPU SEV, thereby causing the boot monitor to read
	 * the JUMPADDR and WAKEMAGIC, and branch to the address found there.
	 */
	dsb_sev();

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		smp_rmb();
		if (prima2_pen_release == -1)
			break;

		udelay(10);
	}

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return prima2_pen_release != -1 ? -ENOSYS : 0;
}