Exemple #1
0
/*==========================================================================*
 * Name:         smp_prepare_cpus (old smp_boot_cpus)
 *
 * Description:  This routine boot up APs.
 *
 * Born on Date: 2002.02.05
 *
 * Arguments:    NONE
 *
 * Returns:      void (cannot fail)
 *
 * Modification log:
 * Date       Who Description
 * ---------- --- --------------------------------------------------------
 * 2003-06-24 hy  modify for linux-2.5.69
 *
 *==========================================================================*/
void __init smp_prepare_cpus(unsigned int max_cpus)
{
	int phys_id;
	unsigned long nr_cpu;

	nr_cpu = inl(M32R_FPGA_NUM_OF_CPUS_PORTL);
	if (nr_cpu > NR_CPUS) {
		printk(KERN_INFO "NUM_OF_CPUS reg. value [%ld] > NR_CPU [%d]",
			nr_cpu, NR_CPUS);
		goto smp_done;
	}
	for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++)
		physid_set(phys_id, phys_cpu_present_map);
#ifndef CONFIG_HOTPLUG_CPU
	cpu_present_map = cpu_possible_map;
#endif

	show_mp_info(nr_cpu);

	init_ipi_lock();

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		printk(KERN_INFO "SMP mode deactivated by commandline.\n");
		goto smp_done;
	}

	/*
	 * Now scan the CPU present map and fire up the other CPUs.
	 */
	Dprintk("CPU present map : %lx\n", physids_coerce(phys_cpu_present_map));

	for (phys_id = 0 ; phys_id < NR_CPUS ; phys_id++) {
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if (phys_id == bsp_phys_id)
			continue;

		if (!physid_isset(phys_id, phys_cpu_present_map))
			continue;

		if ((max_cpus >= 0) && (max_cpus <= cpucount + 1))
			continue;

		do_boot_cpu(phys_id);

		/*
		 * Make sure we unmap all failed CPUs
		 */
		if (physid_to_cpu(phys_id) == -1) {
			physid_clear(phys_id, phys_cpu_present_map);
			printk("phys CPU#%d not responding - " \
				"cannot use it.\n", phys_id);
		}
	}

smp_done:
	Dprintk("Boot done.\n");
}
Exemple #2
0
/*
 * Report back to the Boot Processor during boot time or to the caller processor
 * during CPU online.
 */
static void smp_callin(void)
{
	int cpuid, phys_id;

	/*
	 * If waken up by an INIT in an 82489DX configuration
	 * we may get here before an INIT-deassert IPI reaches
	 * our local APIC.  We have to wait for the IPI or we'll
	 * lock up on an APIC access.
	 *
	 * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
	 */
	cpuid = smp_processor_id();
	if (apic->wait_for_init_deassert && cpuid)
		while (!atomic_read(&init_deasserted))
			cpu_relax();

	/*
	 * (This works even if the APIC is not enabled.)
	 */
	phys_id = read_apic_id();

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */

	pr_debug("CALLIN, before setup_local_APIC()\n");
	if (apic->smp_callin_clear_local_apic)
		apic->smp_callin_clear_local_apic();
	setup_local_APIC();
	end_local_APIC_setup();

	/*
	 * Need to setup vector mappings before we enable interrupts.
	 */
	setup_vector_irq(smp_processor_id());

	/*
	 * Save our processor parameters. Note: this information
	 * is needed for clock calibration.
	 */
	smp_store_cpu_info(cpuid);

	/*
	 * Get our bogomips.
	 * Update loops_per_jiffy in cpu_data. Previous call to
	 * smp_store_cpu_info() stored a value that is close but not as
	 * accurate as the value just calculated.
	 */
	calibrate_delay();
	cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
	pr_debug("Stack at about %p\n", &cpuid);

	/*
	 * This must be done before setting cpu_online_mask
	 * or calling notify_cpu_starting.
	 */
	set_cpu_sibling_map(raw_smp_processor_id());
	wmb();

	notify_cpu_starting(cpuid);

	/*
	 * Allow the master to continue.
	 */
	cpumask_set_cpu(cpuid, cpu_callin_mask);
}
/*
 * Report back to the Boot Processor during boot time or to the caller processor
 * during CPU online.
 */
static void __cpuinit smp_callin(void)
{
    int cpuid, phys_id;
    unsigned long timeout;

    /*
     * If waken up by an INIT in an 82489DX configuration
     * we may get here before an INIT-deassert IPI reaches
     * our local APIC.  We have to wait for the IPI or we'll
     * lock up on an APIC access.
     *
     * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
     */
    cpuid = smp_processor_id();
    if (apic->wait_for_init_deassert && cpuid != 0)
        apic->wait_for_init_deassert(&init_deasserted);

    /*
     * (This works even if the APIC is not enabled.)
     */
    phys_id = read_apic_id();
    if (cpumask_test_cpu(cpuid, cpu_callin_mask)) {
        panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__,
              phys_id, cpuid);
    }
    pr_debug("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);

    /*
     * STARTUP IPIs are fragile beasts as they might sometimes
     * trigger some glue motherboard logic. Complete APIC bus
     * silence for 1 second, this overestimates the time the
     * boot CPU is spending to send the up to 2 STARTUP IPIs
     * by a factor of two. This should be enough.
     */

    /*
     * Waiting 2s total for startup (udelay is not yet working)
     */
    timeout = jiffies + 2*HZ;
    while (time_before(jiffies, timeout)) {
        /*
         * Has the boot CPU finished it's STARTUP sequence?
         */
        if (cpumask_test_cpu(cpuid, cpu_callout_mask))
            break;
        cpu_relax();
    }

    if (!time_before(jiffies, timeout)) {
        panic("%s: CPU%d started up but did not get a callout!\n",
              __func__, cpuid);
    }

    /*
     * the boot CPU has finished the init stage and is spinning
     * on callin_map until we finish. We are free to set up this
     * CPU, first the APIC. (this is probably redundant on most
     * boards)
     */

    pr_debug("CALLIN, before setup_local_APIC()\n");
    if (apic->smp_callin_clear_local_apic)
        apic->smp_callin_clear_local_apic();
    setup_local_APIC();
    end_local_APIC_setup();

    /*
     * Need to setup vector mappings before we enable interrupts.
     */
    setup_vector_irq(smp_processor_id());

    /*
     * Save our processor parameters. Note: this information
     * is needed for clock calibration.
     */
    smp_store_cpu_info(cpuid);

    /*
     * Get our bogomips.
     * Update loops_per_jiffy in cpu_data. Previous call to
     * smp_store_cpu_info() stored a value that is close but not as
     * accurate as the value just calculated.
     */
    calibrate_delay();
    cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
    pr_debug("Stack at about %p\n", &cpuid);

    /*
     * This must be done before setting cpu_online_mask
     * or calling notify_cpu_starting.
     */
    set_cpu_sibling_map(raw_smp_processor_id());
    wmb();

    notify_cpu_starting(cpuid);

    /*
     * Allow the master to continue.
     */
    cpumask_set_cpu(cpuid, cpu_callin_mask);
}
Exemple #4
0
void __init smp_prepare_cpus(unsigned int max_cpus)
{
    unsigned int ncores = get_core_count();
    unsigned int cpu = smp_processor_id();
    int i;

    /* sanity check */
    if (ncores == 0) {
        printk(KERN_ERR
               "MPCORE: strange CPU count of 0? Default to 1\n");

        ncores = 1;
    }

    if (ncores > NR_CPUS) {
        printk(KERN_WARNING
               "MPCORE: no. of cores (%d) greater than configured "
               "maximum of %d - clipping\n",
               ncores, NR_CPUS);
        ncores = NR_CPUS;
    }

    smp_store_cpu_info(cpu);

    /*
     * are we trying to boot more cores than exist?
     */
    if (max_cpus > ncores)
        max_cpus = ncores;

    /*
     * Initialise the present map, which describes the set of CPUs
     * actually populated at the present time.
     */
    for (i = 0; i < max_cpus; i++)
        set_cpu_present(i, true);

    /*
     * Initialise the SCU if there are more than one CPU and let
     * them know where to start. Note that, on modern versions of
     * MILO, the "poke" doesn't actually do anything until each
     * individual core is sent a soft interrupt to get it out of
     * WFI
     */
    if (max_cpus > 1) {
        /* nobody is to be released from the pen yet */
        pen_release = -1;

        /*
         * Enable the local timer or broadcast device for the
         * boot CPU, but only if we have more than one CPU.
         */
        percpu_timer_setup();

        scu_enable(scu_base_addr());

        /* Wakeup other cores in an SoC-specific manner */
        plat_wake_secondary_cpu( max_cpus, platform_secondary_startup );

    }
}
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
asmlinkage void __cpuinit secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();
    aee_rr_rec_hoplug(cpu, 1, 0);

	printk("CPU%u: Booted secondary processor\n", cpu);

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;
	cpumask_set_cpu(cpu, mm_cpumask(mm));
    aee_rr_rec_hoplug(cpu, 2, 0);

	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
    aee_rr_rec_hoplug(cpu, 3, 0);

	/*
	 * TTBR0 is only used for the identity mapping at this stage. Make it
	 * point to zero page to avoid speculatively fetching new entries.
	 */
	cpu_set_reserved_ttbr0();
    aee_rr_rec_hoplug(cpu, 4, 0);
	flush_tlb_all();
    aee_rr_rec_hoplug(cpu, 5, 0);

	preempt_disable();
    aee_rr_rec_hoplug(cpu, 6, 0);
	trace_hardirqs_off();
    aee_rr_rec_hoplug(cpu, 7, 0);

	if (cpu_ops[cpu]->cpu_postboot)
		cpu_ops[cpu]->cpu_postboot();
    aee_rr_rec_hoplug(cpu, 8, 0);

	/*
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
	 * before we continue.
	 */
	set_cpu_online(cpu, true);
    aee_rr_rec_hoplug(cpu, 9, 0);
	complete(&cpu_running);
    aee_rr_rec_hoplug(cpu, 10, 0);

	smp_store_cpu_info(cpu);
    aee_rr_rec_hoplug(cpu, 11, 0);

	/*
	 * Enable GIC and timers.
	 */
	notify_cpu_starting(cpu);
    aee_rr_rec_hoplug(cpu, 12, 0);

	local_dbg_enable();
    aee_rr_rec_hoplug(cpu, 13, 0);
	local_irq_enable();
    aee_rr_rec_hoplug(cpu, 14, 0);
	local_fiq_enable();
    aee_rr_rec_hoplug(cpu, 15, 0);

	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_startup_entry(CPUHP_ONLINE);
    aee_rr_rec_hoplug(cpu, 16, 0);
}
Exemple #6
0
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

#ifndef CONFIG_750_SMP
	/* check for 750's, they just don't work with linux SMP.
	 * If you actually have 750 SMP hardware and want to try to get
	 * it to work, send me a patch to make it work and
	 * I'll make CONFIG_750_SMP a config option.  -- Troy ([email protected])
	 */
	if ( PVR_VER(mfspr(PVR)) == 8 ){
		printk("SMP not supported on 750 cpus. %s line %d\n",
				__FILE__, __LINE__);
		return;
	}
#endif


	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/* Backup CPU 0 state */
	__save_cpu_setup();
	
	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* only regs.msr is actually used, and 0 is OK for it */
		memset(&regs, 0, sizeof(struct pt_regs));
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		init_idle(p, i);

		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);
		
		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 10000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);
		
		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);
	
	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
/* Activate a secondary processor. */
int __devinit start_secondary(void *unused)
{
	unsigned int cpu = smp_processor_id();
	struct device_node *l2_cache;
	int i, base;

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);

#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
	/* Clear any pending timer interrupts */
	mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);

	/* Enable decrementer interrupt */
	mtspr(SPRN_TCR, TCR_DIE);
#endif
	set_dec(tb_ticks_per_jiffy);
	preempt_disable();
	cpu_callin_map[cpu] = 1;

	if (smp_ops->setup_cpu)
		smp_ops->setup_cpu(cpu);
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

	if (system_state > SYSTEM_BOOTING)
		snapshot_timebase();

	secondary_cpu_time_init();

	ipi_call_lock();
	notify_cpu_starting(cpu);
	set_cpu_online(cpu, true);
	/* Update sibling maps */
	base = cpu_first_thread_in_core(cpu);
	for (i = 0; i < threads_per_core; i++) {
		if (cpu_is_offline(base + i))
			continue;
		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
	}
	l2_cache = cpu_to_l2cache(cpu);
	for_each_online_cpu(i) {
		struct device_node *np = cpu_to_l2cache(i);
		if (!np)
			continue;
		if (np == l2_cache) {
			cpumask_set_cpu(cpu, cpu_core_mask(i));
			cpumask_set_cpu(i, cpu_core_mask(cpu));
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);
	ipi_call_unlock();

	local_irq_enable();

	cpu_idle();
	return 0;
}
Exemple #8
0
/* Activate a secondary processor. */
__cpuinit void start_secondary(void *unused)
{
	unsigned int cpu = smp_processor_id();
	struct device_node *l2_cache;
	int i, base;

	atomic_inc(&init_mm.mm_count);
	current->active_mm = &init_mm;

	smp_store_cpu_info(cpu);
	set_dec(tb_ticks_per_jiffy);
	preempt_disable();
	cpu_callin_map[cpu] = 1;

	if (smp_ops->setup_cpu)
		smp_ops->setup_cpu(cpu);
	if (smp_ops->take_timebase)
		smp_ops->take_timebase();

	secondary_cpu_time_init();

#ifdef CONFIG_PPC64
	if (system_state == SYSTEM_RUNNING)
		vdso_data->processorCount++;

	vdso_getcpu_init();
#endif
	notify_cpu_starting(cpu);
	set_cpu_online(cpu, true);
	/* Update sibling maps */
	base = cpu_first_thread_sibling(cpu);
	for (i = 0; i < threads_per_core; i++) {
		if (cpu_is_offline(base + i))
			continue;
		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));

		/* cpu_core_map should be a superset of
		 * cpu_sibling_map even if we don't have cache
		 * information, so update the former here, too.
		 */
		cpumask_set_cpu(cpu, cpu_core_mask(base + i));
		cpumask_set_cpu(base + i, cpu_core_mask(cpu));
	}
	l2_cache = cpu_to_l2cache(cpu);
	for_each_online_cpu(i) {
		struct device_node *np = cpu_to_l2cache(i);
		if (!np)
			continue;
		if (np == l2_cache) {
			cpumask_set_cpu(cpu, cpu_core_mask(i));
			cpumask_set_cpu(i, cpu_core_mask(cpu));
		}
		of_node_put(np);
	}
	of_node_put(l2_cache);

	local_irq_enable();

	cpu_startup_entry(CPUHP_ONLINE);

	BUG();
}
Exemple #9
0
void __init smp_boot_cpus(void)
{
	extern struct task_struct *current_set[NR_CPUS];
	int i, cpu_nr;
	struct task_struct *p;

	printk("Entering SMP Mode...\n");
	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		prof_counter[i] = 1;
		prof_multiplier[i] = 1;
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	smp_ops = ppc_md.smp_ops;
	if (smp_ops == NULL) {
		printk("SMP not supported on this machine.\n");
		return;
	}

	/* Probe arch for CPUs */
	cpu_nr = smp_ops->probe();

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;
	for (i = 1; i < cpu_nr; i++) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in 
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs 
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this 
		   pointer is pointing to some arbitrary point within
		   our stack. */

		memset(&regs, 0, sizeof(struct pt_regs));
		
		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);
		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i] = p;

		/*
		 * There was a cache flush loop here to flush the cache
		 * to memory for the first 8MB of RAM.  The cache flush
		 * has been pushed into the kick_cpu function for those
		 * platforms that need it.
		 */

		/* wake up cpus */
		smp_ops->kick_cpu(i);
		
		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 1000; c && !cpu_callin_map[i] ; c-- )
			udelay(100);
		
		if ( cpu_callin_map[i] )
		{
			char buf[32];
			sprintf(buf, "found cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x350+i);
			printk("Processor %d found.\n", i);
			smp_num_cpus++;
		} else {
			char buf[32];
			sprintf(buf, "didn't find cpu %d", i);
			if (ppc_md.progress) ppc_md.progress(buf, 0x360+i);
			printk("Processor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	smp_ops->setup_cpu(0);
	
	if (smp_num_cpus < 2)
		smp_tb_synchronized = 1;
}
Exemple #10
0
static void __init smp_boot_cpus(unsigned int max_cpus)
{
	int apicid, cpu, bit, kicked;
	unsigned long bogosum = 0;

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
	printk("CPU%d: ", 0);
	print_cpu_info(&cpu_data[0]);

	boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
	boot_cpu_logical_apicid = logical_smp_processor_id();
	x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;

	current_thread_info()->cpu = 0;
	smp_tune_scheduling();
	cpus_clear(cpu_sibling_map[0]);
	cpu_set(0, cpu_sibling_map[0]);

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config && !acpi_lapic) {
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		map_cpu_to_logical_apicid();
		return;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 * Makes no sense to do this check in clustered apic mode, so skip it
	 */
	if (!check_phys_apicid_present(boot_cpu_physical_apicid)) {
		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
				boot_cpu_physical_apicid);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) && !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_physical_apicid);
		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		return;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		smp_found_config = 0;
		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
		smpboot_clear_io_apic_irqs();
		phys_cpu_present_map = physid_mask_of_physid(0);
		return;
	}

	connect_bsp_APIC();
	setup_local_APIC();
	map_cpu_to_logical_apicid();


	setup_portio_remap();

	/*
	 * Scan the CPU present map and fire up the other CPUs via do_boot_cpu
	 *
	 * In clustered apic mode, phys_cpu_present_map is a constructed thus:
	 * bits 0-3 are quad0, 4-7 are quad1, etc. A perverse twist on the 
	 * clustered apic ID.
	 */
	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));

	kicked = 1;
	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
		apicid = cpu_present_to_apicid(bit);
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if ((apicid == boot_cpu_apicid) || (apicid == BAD_APICID))
			continue;

		if (!check_apicid_present(bit))
			continue;
		if (max_cpus <= cpucount+1)
			continue;

		if (do_boot_cpu(apicid))
			printk("CPU #%d not responding - cannot use it.\n",
								apicid);
		else
			++kicked;
	}

	/*
	 * Cleanup possible dangling ends...
	 */
	smpboot_restore_warm_reset_vector();

	/*
	 * Allow the user to impress friends.
	 */
	Dprintk("Before bogomips.\n");
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		if (cpu_isset(cpu, cpu_callout_map))
			bogosum += cpu_data[cpu].loops_per_jiffy;
	printk(KERN_INFO
		"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
		cpucount+1,
		bogosum/(500000/HZ),
		(bogosum/(5000/HZ))%100);
	
	Dprintk("Before bogocount - setting activated=1.\n");

	if (smp_b_stepping)
		printk(KERN_WARNING "WARNING: SMP operation may be unreliable with B stepping processors.\n");

	/*
	 * Don't taint if we are running SMP kernel on a single non-MP
	 * approved Athlon
	 */
	if (tainted & TAINT_UNSAFE_SMP) {
		if (cpucount)
			printk (KERN_INFO "WARNING: This combination of AMD processors is not suitable for SMP.\n");
		else
			tainted &= ~TAINT_UNSAFE_SMP;
	}

	Dprintk("Boot done.\n");

	/*
	 * construct cpu_sibling_map[], so that we can tell sibling CPUs
	 * efficiently.
	 */
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		cpus_clear(cpu_sibling_map[cpu]);

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		int siblings = 0;
		int i;
		if (!cpu_isset(cpu, cpu_callout_map))
			continue;

		if (smp_num_siblings > 1) {
			for (i = 0; i < NR_CPUS; i++) {
				if (!cpu_isset(i, cpu_callout_map))
					continue;
				if (phys_proc_id[cpu] == phys_proc_id[i]) {
					siblings++;
					cpu_set(i, cpu_sibling_map[cpu]);
				}
			}
		} else {
			siblings++;
			cpu_set(cpu, cpu_sibling_map[cpu]);
		}

		if (siblings != smp_num_siblings)
			printk(KERN_WARNING "WARNING: %d siblings found for CPU%d, should be %d\n", siblings, cpu, smp_num_siblings);
	}

	if (nmi_watchdog == NMI_LOCAL_APIC)
		check_nmi_watchdog();

	smpboot_setup_io_apic();

	setup_boot_APIC_clock();

	/*
	 * Synchronize the TSC with the AP
	 */
	if (cpu_has_tsc && cpucount && cpu_khz)
		synchronize_tsc_bp();
}
Exemple #11
0
static void __init smp_callin(void)
{
	int cpuid, phys_id;
	unsigned long timeout;

	/*
	 * If waken up by an INIT in an 82489DX configuration
	 * we may get here before an INIT-deassert IPI reaches
	 * our local APIC.  We have to wait for the IPI or we'll
	 * lock up on an APIC access.
	 */
	wait_for_init_deassert(&init_deasserted);

	/*
	 * (This works even if the APIC is not enabled.)
	 */
	phys_id = GET_APIC_ID(apic_read(APIC_ID));
	cpuid = smp_processor_id();
	if (cpu_isset(cpuid, cpu_callin_map)) {
		printk("huh, phys CPU#%d, CPU#%d already present??\n",
					phys_id, cpuid);
		BUG();
	}
	Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id);

	/*
	 * STARTUP IPIs are fragile beasts as they might sometimes
	 * trigger some glue motherboard logic. Complete APIC bus
	 * silence for 1 second, this overestimates the time the
	 * boot CPU is spending to send the up to 2 STARTUP IPIs
	 * by a factor of two. This should be enough.
	 */

	/*
	 * Waiting 2s total for startup (udelay is not yet working)
	 */
	timeout = jiffies + 2*HZ;
	while (time_before(jiffies, timeout)) {
		/*
		 * Has the boot CPU finished it's STARTUP sequence?
		 */
		if (cpu_isset(cpuid, cpu_callout_map))
			break;
		rep_nop();
	}

	if (!time_before(jiffies, timeout)) {
		printk("BUG: CPU%d started up but did not get a callout!\n",
			cpuid);
		BUG();
	}

	/*
	 * the boot CPU has finished the init stage and is spinning
	 * on callin_map until we finish. We are free to set up this
	 * CPU, first the APIC. (this is probably redundant on most
	 * boards)
	 */

	Dprintk("CALLIN, before setup_local_APIC().\n");
	smp_callin_clear_local_apic();
	setup_local_APIC();
	map_cpu_to_logical_apicid();

	/*
	 * Get our bogomips.
	 */
	calibrate_delay();
	Dprintk("Stack at about %p\n",&cpuid);

	/*
	 * Save our processor parameters
	 */
 	smp_store_cpu_info(cpuid);

	disable_APIC_timer();

	/*
	 * Allow the master to continue.
	 */
	cpu_set(cpuid, cpu_callin_map);

	/*
	 *      Synchronize the TSC with the BP
	 */
	if (cpu_has_tsc && cpu_khz)
		synchronize_tsc_ap();
}
void __init smp_boot_cpus(void)
{
	extern struct current_set_struct current_set[];
	extern void __secondary_start_chrp(void);
	int i, cpu_nr;
	struct task_struct *p;
	unsigned long sp;

	printk("Entering SMP Mode...\n");
	PPCDBG(PPCDBG_SMP, "smp_boot_cpus: start.  NR_CPUS = 0x%lx\n", NR_CPUS);

	smp_num_cpus = 1;
        smp_store_cpu_info(0);
	cpu_online_map = 1UL;

	/*
	 * assume for now that the first cpu booted is
	 * cpu 0, the master -- Cort
	 */
	cpu_callin_map[0] = 1;
	current->processor = 0;

	init_idle();

	for (i = 0; i < NR_CPUS; i++) {
		paca[i].prof_counter = 1;
		paca[i].prof_multiplier = 1;
		if(i != 0) {
		        /*
			 * Processor 0's segment table is statically 
			 * initialized to real address STAB0_PHYS_ADDR.  The
			 * Other processor's tables are created and
			 * initialized here.
			 */
			paca[i].xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)];
			memset((void *)paca[i].xStab_data.virt, 0, PAGE_SIZE); 
			paca[i].xStab_data.real = __v2a(paca[i].xStab_data.virt);
			paca[i].default_decr = tb_ticks_per_jiffy / decr_overclock;
		}
	}

	/*
	 * XXX very rough, assumes 20 bus cycles to read a cache line,
	 * timebase increments every 4 bus cycles, 32kB L1 data cache.
	 */
	cacheflush_time = 5 * 1024;

	/* Probe arch for CPUs */
	cpu_nr = ppc_md.smp_probe();

	printk("Probe found %d CPUs\n", cpu_nr);

	/*
	 * only check for cpus we know exist.  We keep the callin map
	 * with cpus at the bottom -- Cort
	 */
	if (cpu_nr > max_cpus)
		cpu_nr = max_cpus;

#ifdef CONFIG_PPC_ISERIES
	smp_space_timers( cpu_nr );
#endif

	printk("Waiting for %d CPUs\n", cpu_nr-1);

	for ( i = 1 ; i < cpu_nr; i++ ) {
		int c;
		struct pt_regs regs;
		
		/* create a process for the processor */
		/* we don't care about the values in regs since we'll
		   never reschedule the forked task. */
		/* We DO care about one bit in the pt_regs we
		   pass to do_fork.  That is the MSR_FP bit in
		   regs.msr.  If that bit is on, then do_fork
		   (via copy_thread) will call giveup_fpu.
		   giveup_fpu will get a pointer to our (current's)
		   last register savearea via current->thread.regs
		   and using that pointer will turn off the MSR_FP,
		   MSR_FE0 and MSR_FE1 bits.  At this point, this
		   pointer is pointing to some arbitrary point within
		   our stack */

		memset(&regs, 0, sizeof(struct pt_regs));

		if (do_fork(CLONE_VM|CLONE_PID, 0, &regs, 0) < 0)
			panic("failed fork for CPU %d", i);
		p = init_task.prev_task;
		if (!p)
			panic("No idle task for CPU %d", i);

		PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p);

		del_from_runqueue(p);
		unhash_process(p);
		init_tasks[i] = p;

		p->processor = i;
		p->cpus_runnable = 1 << i; /* we schedule the first task manually */
		current_set[i].task = p;
		sp = ((unsigned long)p) + sizeof(union task_union)
			- STACK_FRAME_OVERHEAD;
		current_set[i].sp_real = (void *)__v2a(sp);

		/* wake up cpus */
		ppc_md.smp_kick_cpu(i);

		/*
		 * wait to see if the cpu made a callin (is actually up).
		 * use this value that I found through experimentation.
		 * -- Cort
		 */
		for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) {
			udelay(100);
		}
		
		if ( cpu_callin_map[i] )
		{
			printk("Processor %d found.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d found.\n", i);
			/* this sync's the decr's -- Cort */
			smp_num_cpus++;
		} else {
			printk("Processor %d is stuck.\n", i);
			PPCDBG(PPCDBG_SMP, "\tProcessor %d is stuck.\n", i);
		}
	}

	/* Setup CPU 0 last (important) */
	ppc_md.smp_setup_cpu(0);
	
	if (smp_num_cpus < 2) {
	        tb_last_stamp = get_tb();
		smp_tb_synchronized = 1;
	}
}
Exemple #13
0
static void __init smp_boot_cpus(unsigned int max_cpus)
{
	unsigned apicid, cpu, bit, kicked;

	nmi_watchdog_default();

	/*
	 * Setup boot CPU information
	 */
	smp_store_cpu_info(0); /* Final full version of the data */
	printk(KERN_INFO "CPU%d: ", 0);
	print_cpu_info(&cpu_data[0]);

	current_thread_info()->cpu = 0;
	smp_tune_scheduling();

	if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
		printk("weird, boot CPU (#%d) not listed by the BIOS.\n",
		       hard_smp_processor_id());
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find an SMP configuration at boot time,
	 * get out of here now!
	 */
	if (!smp_found_config) {
		printk(KERN_NOTICE "SMP motherboard not detected.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		if (APIC_init_uniprocessor())
			printk(KERN_NOTICE "Local APIC not detected."
					   " Using dummy APIC emulation.\n");
		goto smp_done;
	}

	/*
	 * Should not be necessary because the MP table should list the boot
	 * CPU too, but we do it for the sake of robustness anyway.
	 */
	if (!physid_isset(boot_cpu_id, phys_cpu_present_map)) {
		printk(KERN_NOTICE "weird, boot CPU (#%d) not listed by the BIOS.\n",
								 boot_cpu_id);
		physid_set(hard_smp_processor_id(), phys_cpu_present_map);
	}

	/*
	 * If we couldn't find a local APIC, then get out of here now!
	 */
	if (APIC_INTEGRATED(apic_version[boot_cpu_id]) && !cpu_has_apic) {
		printk(KERN_ERR "BIOS bug, local APIC #%d not detected!...\n",
			boot_cpu_id);
		printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	verify_local_APIC();

	/*
	 * If SMP should be disabled, then really disable it!
	 */
	if (!max_cpus) {
		smp_found_config = 0;
		printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
		io_apic_irqs = 0;
		cpu_online_map = cpumask_of_cpu(0);
		cpu_set(0, cpu_sibling_map[0]);
		phys_cpu_present_map = physid_mask_of_physid(0);
		disable_apic = 1;
		goto smp_done;
	}

	connect_bsp_APIC();
	setup_local_APIC();

	if (GET_APIC_ID(apic_read(APIC_ID)) != boot_cpu_id)
		BUG();

	x86_cpu_to_apicid[0] = boot_cpu_id;

	/*
	 * Now scan the CPU present map and fire up the other CPUs.
	 */
	Dprintk("CPU present map: %lx\n", physids_coerce(phys_cpu_present_map));

	kicked = 1;
	for (bit = 0; kicked < NR_CPUS && bit < MAX_APICS; bit++) {
		apicid = cpu_present_to_apicid(bit);
		/*
		 * Don't even attempt to start the boot CPU!
		 */
		if (apicid == boot_cpu_id || (apicid == BAD_APICID))
			continue;

		if (!physid_isset(apicid, phys_cpu_present_map))
			continue;
		if ((max_cpus >= 0) && (max_cpus <= cpucount+1))
			continue;

		do_boot_cpu(apicid);
		++kicked;
	}

	/*
	 * Cleanup possible dangling ends...
	 */
	{
		/*
		 * Install writable page 0 entry to set BIOS data area.
		 */
		local_flush_tlb();

		/*
		 * Paranoid:  Set warm reset code and vector here back
		 * to default values.
		 */
		CMOS_WRITE(0, 0xf);

		*((volatile int *) phys_to_virt(0x467)) = 0;
	}

	/*
	 * Allow the user to impress friends.
	 */

	Dprintk("Before bogomips.\n");
	if (!cpucount) {
		printk(KERN_INFO "Only one processor found.\n");
	} else {
		unsigned long bogosum = 0;
		for (cpu = 0; cpu < NR_CPUS; cpu++)
			if (cpu_isset(cpu, cpu_callout_map))
				bogosum += cpu_data[cpu].loops_per_jiffy;
		printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
			cpucount+1,
			bogosum/(500000/HZ),
			(bogosum/(5000/HZ))%100);
		Dprintk("Before bogocount - setting activated=1.\n");
	}

	/*
	 * Construct cpu_sibling_map[], so that we can tell the
	 * sibling CPU efficiently.
	 */
	for (cpu = 0; cpu < NR_CPUS; cpu++)
		cpus_clear(cpu_sibling_map[cpu]);

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		int siblings = 0;
		int i;
		if (!cpu_isset(cpu, cpu_callout_map))
			continue;

		if (smp_num_siblings > 1) {
			for (i = 0; i < NR_CPUS; i++) {
				if (!cpu_isset(i, cpu_callout_map))
					continue;
				if (phys_proc_id[cpu] == phys_proc_id[i]) {
					siblings++;
					cpu_set(i, cpu_sibling_map[cpu]);
				}
			}
		} else { 
			siblings++;
			cpu_set(cpu, cpu_sibling_map[cpu]);
		}

		if (siblings != smp_num_siblings) {
			printk(KERN_WARNING 
	       "WARNING: %d siblings found for CPU%d, should be %d\n", 
			       siblings, cpu, smp_num_siblings);
			smp_num_siblings = siblings;
		}       
	}

	Dprintk("Boot done.\n");

	/*
	 * Here we can be sure that there is an IO-APIC in the system. Let's
	 * go and set it up:
	 */
	if (!skip_ioapic_setup && nr_ioapics)
		setup_IO_APIC();
	else
		nr_ioapics = 0;

	setup_boot_APIC_clock();

	/*
	 * Synchronize the TSC with the AP
	 */
	if (cpu_has_tsc && cpucount)
		synchronize_tsc_bp();

 smp_done:
	time_init_smp();
}
Exemple #14
0
/*
 * This is the secondary CPU boot entry.  We're using this CPUs
 * idle thread stack, but a set of temporary page tables.
 */
asmlinkage void secondary_start_kernel(void)
{
	struct mm_struct *mm = &init_mm;
	unsigned int cpu = smp_processor_id();

	/*
	 * All kernel threads share the same mm context; grab a
	 * reference and switch to it.
	 */
	atomic_inc(&mm->mm_count);
	current->active_mm = mm;

	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
	pr_debug("CPU%u: Booted secondary processor\n", cpu);

	/*
	 * TTBR0 is only used for the identity mapping at this stage. Make it
	 * point to zero page to avoid speculatively fetching new entries.
	 */
	cpu_set_reserved_ttbr0();
	local_flush_tlb_all();
	cpu_set_default_tcr_t0sz();

	preempt_disable();
	trace_hardirqs_off();

	/*
	 * If the system has established the capabilities, make sure
	 * this CPU ticks all of those. If it doesn't, the CPU will
	 * fail to come online.
	 */
	verify_local_cpu_capabilities();

	if (cpu_ops[cpu]->cpu_postboot)
		cpu_ops[cpu]->cpu_postboot();

	/*
	 * Log the CPU info before it is marked online and might get read.
	 */
	cpuinfo_store_cpu();

	/*
	 * Enable GIC and timers.
	 */
	notify_cpu_starting(cpu);

	smp_store_cpu_info(cpu);

	/*
	 * OK, now it's safe to let the boot CPU continue.  Wait for
	 * the CPU migration code to notice that the CPU is online
	 * before we continue.
	 */
	pr_info("CPU%u: Booted secondary processor [%08x]\n",
					 cpu, read_cpuid_id());
	set_cpu_online(cpu, true);
	complete(&cpu_running);

	local_irq_enable();
	local_async_enable();

	/*
	 * OK, it's off to the idle thread for us
	 */
	cpu_startup_entry(CPUHP_ONLINE);
}