Esempio n. 1
0
secondary_entry_fn secondary_cinit(void)
{
	struct thread_info *ti = current_thread_info();
	secondary_entry_fn entry;

	thread_info_init(ti, 0);

	if (!(auxinfo.flags & AUXINFO_MMU_OFF)) {
		ti->pgtable = mmu_idmap;
		mmu_mark_enabled(ti->cpu);
	}

	/*
	 * Save secondary_data.entry locally to avoid opening a race
	 * window between marking ourselves online and calling it.
	 */
	entry = secondary_data.entry;
	set_cpu_online(ti->cpu, true);
	sev();

	/*
	 * Return to the assembly stub, allowing entry to be called
	 * from there with an empty stack.
	 */
	return entry;
}
Esempio n. 2
0
File: smpboot.c Progetto: abligh/xen
void __init
make_cpus_ready(unsigned int max_cpus, unsigned long boot_phys_offset)
{
    unsigned long *gate;
    paddr_t gate_pa;
    int i;

    printk("Waiting for %i other CPUs to be ready\n", max_cpus - 1);
    /* We use the unrelocated copy of smp_up_cpu as that's the one the
     * others can see. */ 
    gate_pa = ((paddr_t) (unsigned long) &smp_up_cpu) + boot_phys_offset;
    gate = map_domain_page(gate_pa >> PAGE_SHIFT) + (gate_pa & ~PAGE_MASK); 
    for ( i = 1; i < max_cpus; i++ )
    {
        /* Tell the next CPU to get ready */
        /* TODO: handle boards where CPUIDs are not contiguous */
        *gate = i;
        flush_xen_dcache(*gate);
        isb();
        sev();
        /* And wait for it to respond */
        while ( ready_cpus < i )
            smp_rmb();
    }
    unmap_domain_page(gate);
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
	void **release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	release_addr = __va(cpu_release_addr[cpu]);

	/*
	 * We write the release address as LE regardless of the native
	 * endianess of the kernel. Therefore, any boot-loaders that
	 * read this address need to convert this address to the
	 * boot-loader's endianess before jumping. This is mandated by
	 * the boot protocol.
	 */
	release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));

	__flush_dcache_area(release_addr, sizeof(release_addr[0]));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	return 0;
}
Esempio n. 4
0
/*
 * Boot a secondary CPU, and assign it the specified idle task.
 * This also gives us the initial stack to use for this CPU.
 */
static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	raw_spin_lock(&boot_lock);

	/*
	 * Update the pen release flag.
	 */
	write_pen_release(cpu_logical_map(cpu));

	/*
	 * Send an event, causing the secondaries to read pen_release.
	 */
	sev();

	timeout = jiffies + (1 * HZ);
	while (time_before(jiffies, timeout)) {
		if (secondary_holding_pen_release == INVALID_HWID)
			break;
		udelay(10);
	}

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	raw_spin_unlock(&boot_lock);

	return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
}
Esempio n. 5
0
/*
 * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
 *    cluster can be torn down without disrupting this CPU.
 *    To avoid deadlocks, this must be called before a CPU is powered down.
 *    The CPU cache (SCTRL.C bit) is expected to be off.
 *    However L2 cache might or might not be active.
 */
static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
{
	dmb();
	mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
	sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
	sev();
}
Esempio n. 6
0
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
/* Obtain a ticket for a given CPU */
static unsigned int bakery_get_ticket(bakery_lock_t *bakery, unsigned int me)
{
	unsigned int my_ticket, their_ticket;
	unsigned int they;

	/*
	 * Flag that we're busy getting our ticket. All CPUs are iterated in the
	 * order of their ordinal position to decide the maximum ticket value
	 * observed so far. Our priority is set to be greater than the maximum
	 * observed priority
	 *
	 * Note that it's possible that more than one contender gets the same
	 * ticket value. That's OK as the lock is acquired based on the priority
	 * value, not the ticket value alone.
	 */
	my_ticket = 0;
	bakery->entering[me] = 1;
	for (they = 0; they < BAKERY_LOCK_MAX_CPUS; they++) {
		their_ticket = bakery->number[they];
		if (their_ticket > my_ticket)
			my_ticket = their_ticket;
	}

	/*
	 * Compute ticket; then signal to other contenders waiting for us to
	 * finish calculating our ticket value that we're done
	 */
	++my_ticket;
	bakery->number[me] = my_ticket;
	bakery->entering[me] = 0;
	sev();

	return my_ticket;
}
int main()
{
	int delay;
	unsigned int inchar;
	unsigned int newlr;

    //Disable cache on OCM
    Xil_SetTlbAttributes(0xFFFF0000,0x14de2);           // S=b1 TEX=b100 AP=b11, Domain=b1111, C=b0, B=b0
    SetupIntrSystem(&IntcInstancePtr);
    print("CPU0: writing startaddress for cpu1\n\r");
    Xil_Out32(CPU1STARTADR, 0x00200000);
    dmb(); //waits until write has finished

    print("CPU0: sending the SEV to wake up CPU1\n\r");
    sev();

    while(1){

    	inchar = getchar();
    	newlr = getchar();
    	//for( delay = 0; delay < OP_DELAY; delay++)//wait
    	//{}
    	xil_printf("value %d",inchar);
    	Xil_Out8(LED_PAT,inchar);

    }

    return 0;
}
Esempio n. 9
0
static int __init smp_spin_table_cpu_up(int cpu)
{
    paddr_t __iomem *release;

    if (!cpu_release_addr[cpu])
    {
        printk("CPU%d: No release addr\n", cpu);
        return -ENODEV;
    }

    release = ioremap_nocache(cpu_release_addr[cpu], 8);
    if ( !release )
    {
        dprintk(XENLOG_ERR, "CPU%d: Unable to map release address\n", cpu);
        return -EFAULT;
    }

    writeq(__pa(init_secondary), release);

    iounmap(release);

    sev();

    return cpu_up_send_sgi(cpu);
}
Esempio n. 10
0
/*
 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
 * @state: the final state of the cluster:
 *     CLUSTER_UP: no destructive teardown was done and the cluster has been
 *         restored to the previous state (CPU cache still active); or
 *     CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
 *         (CPU cache disabled, L2 cache either enabled or disabled).
 */
static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
{
	dmb();
	mcpm_sync.clusters[cluster].cluster = state;
	sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
	sev();
}
Esempio n. 11
0
static void action_queue_complete(struct cpu_action_queue *q,
					struct cpu_action *action)
{
	/* Mark completion and send events to waiters. */
	store_release(&q->completed, action);
	sev();
}
Esempio n. 12
0
void scent_map::draw( WINDOW *const win, const int div, const tripoint &center ) const
{
    assert( div != 0 );
    const int maxx = getmaxx( win );
    const int maxy = getmaxy( win );
    for( int x = 0; x < maxx; ++x ) {
        for( int y = 0; y < maxy; ++y ) {
            const int sn = operator()( x + center.x - maxx / 2, y + center.y - maxy / 2 ) / div;
            mvwprintz( win, y, x, sev( sn / 10 ), "%d", sn % 10 );
        }
    }
}
void bakery_lock_release(unsigned int id, unsigned int offset)
{
	bakery_info_t *my_bakery_info;
	unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT;

	my_bakery_info = get_my_bakery_info(offset, id);
	assert(bakery_ticket_number(my_bakery_info->lock_data));

	my_bakery_info->lock_data = 0;
	write_cache_op(my_bakery_info, is_cached);
	sev();
}
Esempio n. 14
0
void
tegra124_mp_start_ap(platform_t plat)
{
	bus_space_handle_t pmc;
	bus_space_handle_t exvec;
	int i;
	uint32_t val;
	uint32_t mask;

	if (bus_space_map(fdtbus_bs_tag, PMC_PHYSBASE, PMC_SIZE, 0, &pmc) != 0)
		panic("Couldn't map the PMC\n");
	if (bus_space_map(fdtbus_bs_tag, TEGRA_EXCEPTION_VECTORS_BASE,
	    TEGRA_EXCEPTION_VECTORS_SIZE, 0, &exvec) != 0)
		panic("Couldn't map the exception vectors\n");

	bus_space_write_4(fdtbus_bs_tag, exvec , TEGRA_EXCEPTION_VECTOR_ENTRY,
			  pmap_kextract((vm_offset_t)mpentry));
	bus_space_read_4(fdtbus_bs_tag, exvec , TEGRA_EXCEPTION_VECTOR_ENTRY);


	/* Wait until POWERGATE is ready (max 20 APB cycles). */
	do {
		val = bus_space_read_4(fdtbus_bs_tag, pmc,
		    PMC_PWRGATE_TOGGLE);
	} while ((val & PCM_PWRGATE_TOGGLE_START) != 0);

	for (i = 1; i < mp_ncpus; i++) {
		val = bus_space_read_4(fdtbus_bs_tag, pmc, PMC_PWRGATE_STATUS);
		mask = 1 << (i + 8);	/* cpu mask */
		if ((val & mask) == 0) {
			/* Wait until POWERGATE is ready (max 20 APB cycles). */
			do {
				val = bus_space_read_4(fdtbus_bs_tag, pmc,
				PMC_PWRGATE_TOGGLE);
			} while ((val & PCM_PWRGATE_TOGGLE_START) != 0);
			bus_space_write_4(fdtbus_bs_tag, pmc,
			    PMC_PWRGATE_TOGGLE,
			    PCM_PWRGATE_TOGGLE_START | (8 + i));

			/* Wait until CPU is powered */
			do {
				val = bus_space_read_4(fdtbus_bs_tag, pmc,
				    PMC_PWRGATE_STATUS);
			} while ((val & mask) == 0);
		}

	}
	dsb();
	sev();
	bus_space_unmap(fdtbus_bs_tag, pmc, PMC_SIZE);
	bus_space_unmap(fdtbus_bs_tag, exvec, TEGRA_EXCEPTION_VECTORS_SIZE);
}
static int smp_spin_table_cpu_boot(unsigned int cpu)
{
	/*
	 * Update the pen release flag.
	 */
	write_pen_release(cpu_logical_map(cpu));

	/*
	 * Send an event, causing the secondaries to read pen_release.
	 */
	sev();

	return 0;
}
/* Release the lock and signal contenders */
void bakery_lock_release(bakery_lock_t *bakery)
{
	unsigned int me = plat_my_core_pos();

	assert_bakery_entry_valid(me, bakery);
	assert(bakery_ticket_number(bakery->lock_data[me]));

	/*
	 * Release lock by resetting ticket. Then signal other
	 * waiting contenders
	 */
	bakery->lock_data[me] = 0;
	dsb();
	sev();
}
Esempio n. 17
0
/* Release the lock and signal contenders */
void bakery_lock_release(unsigned long mpidr, bakery_lock_t *bakery)
{
	unsigned int me = platform_get_core_pos(mpidr);

	assert_bakery_entry_valid(me, bakery);
	assert(bakery->owner == me);

	/*
	 * Release lock by resetting ownership and ticket. Then signal other
	 * waiting contenders
	 */
	bakery->owner = NO_OWNER;
	bakery->number[me] = 0;
	sev();
}
Esempio n. 18
0
void
exynos5_mp_start_ap(platform_t plat)
{
	bus_addr_t sysram, pmu;
	int err, i, j;
	int status;
	int reg;

	err = bus_space_map(fdtbus_bs_tag, EXYNOS_PMU_BASE, 0x20000, 0, &pmu);
	if (err != 0)
		panic("Couldn't map pmu\n");

	if (exynos_get_soc_id() == EXYNOS5420_SOC_ID)
		reg = EXYNOS5420_SYSRAM_NS;
	else
		reg = EXYNOS_SYSRAM;

	err = bus_space_map(fdtbus_bs_tag, reg, 0x100, 0, &sysram);
	if (err != 0)
		panic("Couldn't map sysram\n");

	/* Give power to CPUs */
	for (i = 1; i < mp_ncpus; i++) {
		bus_space_write_4(fdtbus_bs_tag, pmu, CORE_CONFIG(i),
		    CORE_PWR_EN);

		for (j = 10; j >= 0; j--) {
			status = bus_space_read_4(fdtbus_bs_tag, pmu,
			    CORE_STATUS(i));
			if ((status & CORE_PWR_EN) == CORE_PWR_EN)
				break;
			DELAY(10);
			if (j == 0)
				printf("Can't power on CPU%d\n", i);
		}
	}

	bus_space_write_4(fdtbus_bs_tag, sysram, 0x0,
	    pmap_kextract((vm_offset_t)mpentry));

	dcache_wbinv_poc_all();

	dsb();
	sev();
	bus_space_unmap(fdtbus_bs_tag, sysram, 0x100);
	bus_space_unmap(fdtbus_bs_tag, pmu, 0x20000);
}
Esempio n. 19
0
void plat_cpu_reset_late(void)
{
	static uint32_t cntfrq;
	vaddr_t addr;

	if (!get_core_pos()) {
		/* read cnt freq */
		cntfrq = read_cntfrq();

#if defined(CFG_BOOT_SECONDARY_REQUEST)
		/* set secondary entry address */
		write32(__compiler_bswap32(CFG_TEE_LOAD_ADDR),
				DCFG_BASE + DCFG_SCRATCHRW1);

		/* release secondary cores */
		write32(__compiler_bswap32(0x1 << 1), /* cpu1 */
				DCFG_BASE + DCFG_CCSR_BRR);
		dsb();
		sev();
#endif

		/* configure CSU */

		/* first grant all peripherals */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			write32(__compiler_bswap32(CSU_ACCESS_ALL), addr);

		/* restrict key preipherals from NS */
		write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
			CSU_BASE + CSU_CSL30);
		write32(__compiler_bswap32(CSU_ACCESS_SEC_ONLY),
			CSU_BASE + CSU_CSL37);

		/* lock the settings */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			write32(read32(addr) |
				__compiler_bswap32(CSU_SETTING_LOCK),
				addr);
	} else {
		/* program the cntfrq, the cntfrq is banked for each core */
		write_cntfrq(cntfrq);
	}
}
Esempio n. 20
0
/* Executed by primary CPU, brings other CPUs out of reset. Called at boot
   as well as when a CPU is coming out of shutdown induced by echo 0 >
   /sys/devices/.../cpuX.
*/
int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	int cnt = 0;
	printk(KERN_DEBUG "Starting secondary CPU %d\n", cpu);

	/* Tell other CPUs to come out or reset.  Note that secondary CPUs
	 * are probably running with caches off, so we'll need to clean to
	 * memory. Normal cache ops will only clean to L2.
	 */
	pen_release = cpu;
	dmac_clean_range((void *)&pen_release,
			 (void *)(&pen_release + sizeof(pen_release)));
	dmac_clean_range((void *)&secondary_data,
			 (void *)(&secondary_data + sizeof(secondary_data)));
	sev();
	dsb();

	/* Use smp_cross_call() to send a soft interrupt to wake up
	 * the other core.
	 */
	smp_cross_call(cpumask_of(cpu));

	/* Wait for done signal. The cpu receiving the signal does not
	 * have the MMU or caching turned on, so all of its reads and
	 * writes are to/from memory.  Need to ensure that when
	 * reading the value we invalidate the cache line so we see the
	 * fresh data from memory as the normal routines may only
	 * invalidate to POU or L1.
	 */
	while (pen_release != 0xFFFFFFFF) {
		dmac_inv_range((void *)&pen_release,
			       (void *)(&pen_release+sizeof(pen_release)));
		msleep_interruptible(1);
		if (cnt++ >= SECONDARY_CPU_WAIT_MS)
			break;
	}

	if (pen_release == 0xFFFFFFFF)
		printk(KERN_DEBUG "Secondary CPU start acked %d\n", cpu);
	else
		printk(KERN_ERR "Secondary CPU failed to start..." \
		       "continuing\n");

	return 0;
}
Esempio n. 21
0
static int __init smp_spin_table_prepare_cpu(int cpu)
{
	void **release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	release_addr = __va(cpu_release_addr[cpu]);
	release_addr[0] = (void *)__pa(secondary_holding_pen);
	__flush_dcache_area(release_addr, sizeof(release_addr[0]));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	return 0;
}
static int smp_spin_table_cpu_boot(unsigned int cpu)
{
#ifdef CONFIG_HOTPLUG_CPU
	if (soc_ops && soc_ops->cpu_on)
		soc_ops->cpu_on(cpu);
#endif
	/*
	 * Update the pen release flag.
	 */
	write_pen_release(cpu_logical_map(cpu));

	/*
	 * Send an event, causing the secondaries to read pen_release.
	 */
	sev();

	return 0;
}
Esempio n. 23
0
/*******************************************************************************
 * Platform handler called when a power domain is about to be turned on. The
 * mpidr determines the CPU to be turned on.
 ******************************************************************************/
static int rpi3_pwr_domain_on(u_register_t mpidr)
{
	int rc = PSCI_E_SUCCESS;
	unsigned int pos = plat_core_pos_by_mpidr(mpidr);
	uint64_t *hold_base = (uint64_t *)PLAT_RPI3_TM_HOLD_BASE;

	assert(pos < PLATFORM_CORE_COUNT);

	hold_base[pos] = PLAT_RPI3_TM_HOLD_STATE_GO;

	/* Make sure that the write has completed */
	dsb();
	isb();

	sev();

	return rc;
}
Esempio n. 24
0
void    
zynq7_mp_start_ap(platform_t plat)
{
	bus_space_handle_t scu_handle;
	bus_space_handle_t ocm_handle;
	uint32_t scu_ctrl;

	/* Map in SCU control register. */
	if (bus_space_map(fdtbus_bs_tag, SCU_CONTROL_REG, 4,
			  0, &scu_handle) != 0)
		panic("platform_mp_start_ap: Couldn't map SCU config reg\n");

	/* Set SCU enable bit. */
	scu_ctrl = bus_space_read_4(fdtbus_bs_tag, scu_handle, 0);
	scu_ctrl |= SCU_CONTROL_ENABLE;
	bus_space_write_4(fdtbus_bs_tag, scu_handle, 0, scu_ctrl);

	bus_space_unmap(fdtbus_bs_tag, scu_handle, 4);

	/* Map in magic location to give entry address to CPU1. */
	if (bus_space_map(fdtbus_bs_tag, ZYNQ7_CPU1_ENTRY, 4,
	    0, &ocm_handle) != 0)
		panic("platform_mp_start_ap: Couldn't map OCM\n");

	/* Write start address for CPU1. */
	bus_space_write_4(fdtbus_bs_tag, ocm_handle, 0,
	    pmap_kextract((vm_offset_t)mpentry));

	bus_space_unmap(fdtbus_bs_tag, ocm_handle, 4);

	/*
	 * The SCU is enabled above but I think the second CPU doesn't
	 * turn on filtering until after the wake-up below. I think that's why
	 * things don't work if I don't put these cache ops here.  Also, the
	 * magic location, 0xfffffff0, isn't in the SCU's filtering range so it
	 * needs a write-back too.
	 */
	dcache_wbinv_poc_all();

	/* Wake up CPU1. */
	dsb();
	sev();
}
Esempio n. 25
0
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	unsigned long timeout;

	/*
	 * set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/* Initialize the boot status and give the secondary core
	 * the start address of the kernel, let the write buffer drain
	 */
	__raw_writel(0, OCM_HIGH_BASE + BOOT_STATUS_OFFSET);

	__raw_writel(virt_to_phys(secondary_startup),
					OCM_HIGH_BASE + BOOT_ADDR_OFFSET);
	wmb();

	/*
	 * Send an event to wake the secondary core from WFE state.
	 */
	sev();

	/*
	 * Wait for the other CPU to boot, but timeout if it doesn't
	 */
	timeout = jiffies + (1 * HZ);
	while ((__raw_readl(OCM_HIGH_BASE + BOOT_STATUS_OFFSET) !=
				BOOT_STATUS_CPU1_UP) &&
				(time_before(jiffies, timeout)))
		rmb();

	/*
	 * now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Esempio n. 26
0
void plat_cpu_reset_late(void)
{
	vaddr_t addr;

	if (!get_core_pos()) {
#if defined(CFG_BOOT_SECONDARY_REQUEST)
		/* set secondary entry address */
		io_write32(DCFG_BASE + DCFG_SCRATCHRW1,
			   __compiler_bswap32(TEE_LOAD_ADDR));

		/* release secondary cores */
		io_write32(DCFG_BASE + DCFG_CCSR_BRR /* cpu1 */,
			   __compiler_bswap32(0x1 << 1));
		dsb();
		sev();
#endif

		/* configure CSU */

		/* first grant all peripherals */
		for (addr = CSU_BASE + CSU_CSL_START;
			 addr != CSU_BASE + CSU_CSL_END;
			 addr += 4)
			io_write32(addr, __compiler_bswap32(CSU_ACCESS_ALL));

		/* restrict key preipherals from NS */
		io_write32(CSU_BASE + CSU_CSL30,
			   __compiler_bswap32(CSU_ACCESS_SEC_ONLY));
		io_write32(CSU_BASE + CSU_CSL37,
			   __compiler_bswap32(CSU_ACCESS_SEC_ONLY));

		/* lock the settings */
		for (addr = CSU_BASE + CSU_CSL_START;
		     addr != CSU_BASE + CSU_CSL_END;
		     addr += 4)
			io_setbits32(addr,
				     __compiler_bswap32(CSU_SETTING_LOCK));
	}
}
Esempio n. 27
0
File: smpboot.c Progetto: abligh/xen
/* Bring up a remote CPU */
int __cpu_up(unsigned int cpu)
{
    /* Tell the remote CPU which stack to boot on. */
    init_stack = idle_vcpu[cpu]->arch.stack;

    /* Unblock the CPU.  It should be waiting in the loop in head.S
     * for an event to arrive when smp_up_cpu matches its cpuid. */
    smp_up_cpu = cpu;
    /* we need to make sure that the change to smp_up_cpu is visible to
     * secondary cpus with D-cache off */
    flush_xen_dcache(smp_up_cpu);
    isb();
    sev();

    while ( !cpu_online(cpu) )
    {
        cpu_relax();
        process_pending_softirqs();
    }

    return 0;
}
static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
	__le64 __iomem *release_addr;

	if (!cpu_release_addr[cpu])
		return -ENODEV;

	/*
	 * The cpu-release-addr may or may not be inside the linear mapping.
	 * As ioremap_cache will either give us a new mapping or reuse the
	 * existing linear mapping, we can use it to cover both cases. In
	 * either case the memory will be MT_NORMAL.
	 */
	release_addr = ioremap_cache(cpu_release_addr[cpu],
				     sizeof(*release_addr));
	if (!release_addr)
		return -ENOMEM;

	/*
	 * We write the release address as LE regardless of the native
	 * endianess of the kernel. Therefore, any boot-loaders that
	 * read this address need to convert this address to the
	 * boot-loader's endianess before jumping. This is mandated by
	 * the boot protocol.
	 */
	writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
	__flush_dcache_area((__force void *)release_addr,
			    sizeof(*release_addr));

	/*
	 * Send an event to wake up the secondary CPU.
	 */
	sev();

	iounmap(release_addr);

	return 0;
}
Esempio n. 29
0
void    
platform_mp_start_ap(void)
{
	bus_space_handle_t scu;
	bus_space_handle_t src;

	uint32_t val;
	int i;

	if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0)
		panic("Couldn't map the SCU\n");
	if (bus_space_map(fdtbus_bs_tag, SRC_PHYSBASE, SRC_SIZE, 0, &src) != 0)
		panic("Couldn't map the system reset controller (SRC)\n");

	/*
	 * Invalidate SCU cache tags.  The 0x0000ffff constant invalidates all
	 * ways on all cores 0-3.  Per the ARM docs, it's harmless to write to
	 * the bits for cores that are not present.
	 */
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff);

	/*
	 * Erratum ARM/MP: 764369 (problems with cache maintenance).
	 * Setting the "disable-migratory bit" in the undocumented SCU
	 * Diagnostic Control Register helps work around the problem.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_DIAG_CONTROL, 
	    val | SCU_DIAG_DISABLE_MIGBIT);

	/*
	 * Enable the SCU, then clean the cache on this core.  After these two
	 * operations the cache tag ram in the SCU is coherent with the contents
	 * of the cache on this core.  The other cores aren't running yet so
	 * their caches can't contain valid data yet, but we've initialized
	 * their SCU tag ram above, so they will be coherent from startup.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG, 
	    val | SCU_CONTROL_ENABLE);
	dcache_wbinv_poc_all();

	/*
	 * For each AP core, set the entry point address and argument registers,
	 * and set the core-enable and core-reset bits in the control register.
	 */
	val = bus_space_read_4(fdtbus_bs_tag, src, SRC_CONTROL_REG);
	for (i=1; i < mp_ncpus; i++) {
		bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR0_C1FUNC + 8*i,
		    pmap_kextract((vm_offset_t)mpentry));
		bus_space_write_4(fdtbus_bs_tag, src, SRC_GPR1_C1ARG  + 8*i, 0);

		val |= ((1 << (SRC_CONTROL_C1ENA_SHIFT - 1 + i )) |
		    ( 1 << (SRC_CONTROL_C1RST_SHIFT - 1 + i)));

	}
	bus_space_write_4(fdtbus_bs_tag, src, SRC_CONTROL_REG, val);

	dsb();
	sev();

	bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE);
	bus_space_unmap(fdtbus_bs_tag, src, SRC_SIZE);
}
Esempio n. 30
0
void
platform_mp_start_ap(void)
{
	bus_space_handle_t scu;
	bus_space_handle_t imem;
	bus_space_handle_t pmu;
	uint32_t val;
	int i;

	if (bus_space_map(fdtbus_bs_tag, SCU_PHYSBASE, SCU_SIZE, 0, &scu) != 0)
		panic("Could not map the SCU");
	if (bus_space_map(fdtbus_bs_tag, IMEM_PHYSBASE,
	    IMEM_SIZE, 0, &imem) != 0)
		panic("Could not map the IMEM");
	if (bus_space_map(fdtbus_bs_tag, PMU_PHYSBASE, PMU_SIZE, 0, &pmu) != 0)
		panic("Could not map the PMU");

	/*
	 * Invalidate SCU cache tags.  The 0x0000ffff constant invalidates all
	 * ways on all cores 0-3. Per the ARM docs, it's harmless to write to
	 * the bits for cores that are not present.
	 */
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_INV_TAGS_REG, 0x0000ffff);

	/* Make sure all cores except the first are off */
	val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON);
	for (i = 1; i < mp_ncpus; i++)
		val |= 1 << i;
	bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val);

	/* Enable SCU power domain */
	val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON);
	val &= ~PMU_PWRDN_SCU;
	bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val);

	/* Enable SCU */
	val = bus_space_read_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG);
	bus_space_write_4(fdtbus_bs_tag, scu, SCU_CONTROL_REG,
	    val | SCU_CONTROL_ENABLE);

	/*
	 * Cores will execute the code which resides at the start of
	 * the on-chip bootram/sram after power-on. This sram region
	 * should be reserved and the trampoline code that directs
	 * the core to the real startup code in ram should be copied
	 * into this sram region.
	 *
	 * First set boot function for the sram code.
	 */
	mpentry_addr = (char *)pmap_kextract((vm_offset_t)mpentry);

	/* Copy trampoline to sram, that runs during startup of the core */
	bus_space_write_region_4(fdtbus_bs_tag, imem, 0,
	    (uint32_t *)&rk30xx_boot2, 8);

	dcache_wbinv_poc_all();

	/* Start all cores */
	val = bus_space_read_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON);
	for (i = 1; i < mp_ncpus; i++)
		val &= ~(1 << i);
	bus_space_write_4(fdtbus_bs_tag, pmu, PMU_PWRDN_CON, val);

	dsb();
	sev();

	bus_space_unmap(fdtbus_bs_tag, scu, SCU_SIZE);
	bus_space_unmap(fdtbus_bs_tag, imem, IMEM_SIZE);
	bus_space_unmap(fdtbus_bs_tag, pmu, PMU_SIZE);
}