void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; DBG("smp_prepare_cpus\n"); /* * setup_cpu may need to be called on the boot cpu. We havent * spun any cpus up but lets be paranoid. */ BUG_ON(boot_cpuid != smp_processor_id()); /* Fixup boot cpu */ smp_store_cpu_info(boot_cpuid); cpu_callin_map[boot_cpuid] = 1; if (smp_ops) max_cpus = smp_ops->probe(); else max_cpus = 1; smp_space_timers(max_cpus); for_each_possible_cpu(cpu) if (cpu != boot_cpuid) smp_create_idle(cpu); }
static void smp_chrp_setup_cpu(int cpu_nr) { static atomic_t ready = ATOMIC_INIT(1); static volatile int frozen = 0; if (systemcfg->platform == PLATFORM_PSERIES_LPAR) { /* timebases already synced under the hypervisor. */ paca[cpu_nr].next_jiffy_update_tb = tb_last_stamp = get_tb(); if (cpu_nr == 0) { systemcfg->tb_orig_stamp = tb_last_stamp; /* Should update naca->stamp_xsec. * For now we leave it which means the time can be some * number of msecs off until someone does a settimeofday() */ } smp_tb_synchronized = 1; } else { if (cpu_nr == 0) { /* wait for all the others */ while (atomic_read(&ready) < smp_num_cpus) barrier(); atomic_set(&ready, 1); /* freeze the timebase */ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); mb(); frozen = 1; set_tb(0, 0); paca[0].next_jiffy_update_tb = 0; smp_space_timers(smp_num_cpus); while (atomic_read(&ready) < smp_num_cpus) barrier(); /* thaw the timebase again */ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); mb(); frozen = 0; tb_last_stamp = get_tb(); systemcfg->tb_orig_stamp = tb_last_stamp; smp_tb_synchronized = 1; } else { atomic_inc(&ready); while (!frozen) barrier(); set_tb(0, 0); mb(); atomic_inc(&ready); while (frozen) barrier(); } } if (OpenPIC_Addr) { do_openpic_setup_cpu(); } else { if (cpu_nr > 0) xics_setup_cpu(); } }
void __init smp_prepare_cpus(unsigned int max_cpus) { unsigned int cpu; DBG("smp_prepare_cpus\n"); /* * setup_cpu may need to be called on the boot cpu. We havent * spun any cpus up but lets be paranoid. */ BUG_ON(boot_cpuid != smp_processor_id()); /* Fixup boot cpu */ smp_store_cpu_info(boot_cpuid); cpu_callin_map[boot_cpuid] = 1; for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(&per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); zalloc_cpumask_var_node(&per_cpu(cpu_core_map, cpu), GFP_KERNEL, cpu_to_node(cpu)); } cpumask_set_cpu(boot_cpuid, cpu_sibling_mask(boot_cpuid)); cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); if (smp_ops) if (smp_ops->probe) max_cpus = smp_ops->probe(); else max_cpus = NR_CPUS; else max_cpus = 1; smp_space_timers(max_cpus); for_each_possible_cpu(cpu) if (cpu != boot_cpuid) smp_create_idle(cpu); }
void __init smp_boot_cpus(void) { extern struct current_set_struct current_set[]; extern void __secondary_start_chrp(void); int i, cpu_nr; struct task_struct *p; unsigned long sp; printk("Entering SMP Mode...\n"); PPCDBG(PPCDBG_SMP, "smp_boot_cpus: start. NR_CPUS = 0x%lx\n", NR_CPUS); smp_num_cpus = 1; smp_store_cpu_info(0); cpu_online_map = 1UL; /* * assume for now that the first cpu booted is * cpu 0, the master -- Cort */ cpu_callin_map[0] = 1; current->processor = 0; init_idle(); for (i = 0; i < NR_CPUS; i++) { paca[i].prof_counter = 1; paca[i].prof_multiplier = 1; if(i != 0) { /* * Processor 0's segment table is statically * initialized to real address STAB0_PHYS_ADDR. The * Other processor's tables are created and * initialized here. */ paca[i].xStab_data.virt = (unsigned long)&stab_array[PAGE_SIZE * (i-1)]; memset((void *)paca[i].xStab_data.virt, 0, PAGE_SIZE); paca[i].xStab_data.real = __v2a(paca[i].xStab_data.virt); paca[i].default_decr = tb_ticks_per_jiffy / decr_overclock; } } /* * XXX very rough, assumes 20 bus cycles to read a cache line, * timebase increments every 4 bus cycles, 32kB L1 data cache. */ cacheflush_time = 5 * 1024; /* Probe arch for CPUs */ cpu_nr = ppc_md.smp_probe(); printk("Probe found %d CPUs\n", cpu_nr); /* * only check for cpus we know exist. We keep the callin map * with cpus at the bottom -- Cort */ if (cpu_nr > max_cpus) cpu_nr = max_cpus; #ifdef CONFIG_PPC_ISERIES smp_space_timers( cpu_nr ); #endif printk("Waiting for %d CPUs\n", cpu_nr-1); for ( i = 1 ; i < cpu_nr; i++ ) { int c; struct pt_regs regs; /* create a process for the processor */ /* we don't care about the values in regs since we'll never reschedule the forked task. */ /* We DO care about one bit in the pt_regs we pass to do_fork. That is the MSR_FP bit in regs.msr. If that bit is on, then do_fork (via copy_thread) will call giveup_fpu. giveup_fpu will get a pointer to our (current's) last register savearea via current->thread.regs and using that pointer will turn off the MSR_FP, MSR_FE0 and MSR_FE1 bits. At this point, this pointer is pointing to some arbitrary point within our stack */ memset(®s, 0, sizeof(struct pt_regs)); if (do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0) < 0) panic("failed fork for CPU %d", i); p = init_task.prev_task; if (!p) panic("No idle task for CPU %d", i); PPCDBG(PPCDBG_SMP,"\tProcessor %d, task = 0x%lx\n", i, p); del_from_runqueue(p); unhash_process(p); init_tasks[i] = p; p->processor = i; p->cpus_runnable = 1 << i; /* we schedule the first task manually */ current_set[i].task = p; sp = ((unsigned long)p) + sizeof(union task_union) - STACK_FRAME_OVERHEAD; current_set[i].sp_real = (void *)__v2a(sp); /* wake up cpus */ ppc_md.smp_kick_cpu(i); /* * wait to see if the cpu made a callin (is actually up). * use this value that I found through experimentation. * -- Cort */ for ( c = 5000; c && !cpu_callin_map[i] ; c-- ) { udelay(100); } if ( cpu_callin_map[i] ) { printk("Processor %d found.\n", i); PPCDBG(PPCDBG_SMP, "\tProcessor %d found.\n", i); /* this sync's the decr's -- Cort */ smp_num_cpus++; } else { printk("Processor %d is stuck.\n", i); PPCDBG(PPCDBG_SMP, "\tProcessor %d is stuck.\n", i); } } /* Setup CPU 0 last (important) */ ppc_md.smp_setup_cpu(0); if (smp_num_cpus < 2) { tb_last_stamp = get_tb(); smp_tb_synchronized = 1; } }