void __init start_parisc(void) { extern void early_trap_init(void); int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; /* check QEMU/SeaBIOS marker in PAGE0 */ running_on_qemu = (memcmp(&PAGE0->pad0, "SeaBIOS", 8) == 0); cpunum = smp_processor_id(); init_cpu_topology(); set_firmware_width_unlocked(); ret = pdc_coproc_cfg_unlocked(&coproc_cfg); if (ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; asm volatile ("fstd %fr0,8(%sp)"); } else {
void __init smp_prepare_cpus(unsigned int max_cpus) { int err; unsigned int cpu, ncores = num_possible_cpus(); init_cpu_topology(); smp_store_cpu_info(smp_processor_id()); /* * are we trying to boot more cores than exist? */ if (max_cpus > ncores) max_cpus = ncores; /* Don't bother if we're effectively UP */ if (max_cpus <= 1) return; /* * Initialise the present map (which describes the set of CPUs * actually populated at the present time) and release the * secondaries from the bootloader. * * Make sure we online at most (max_cpus - 1) additional CPUs. */ max_cpus--; for_each_possible_cpu(cpu) { if (max_cpus == 0) break; if (cpu == smp_processor_id()) continue; if (!cpu_ops[cpu]) continue; err = cpu_ops[cpu]->cpu_prepare(cpu); if (err) continue; set_cpu_present(cpu, true); max_cpus--; } }
void __init smp_prepare_cpus(unsigned int max_cpus) { int err; unsigned int cpu; unsigned int this_cpu; init_cpu_topology(); this_cpu = smp_processor_id(); store_cpu_topology(this_cpu); numa_store_cpu_info(this_cpu); /* * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set * secondary CPUs present. */ if (max_cpus == 0) return; /* * Initialise the present map (which describes the set of CPUs * actually populated at the present time) and release the * secondaries from the bootloader. */ for_each_possible_cpu(cpu) { if (cpu == smp_processor_id()) continue; if (!cpu_ops[cpu]) continue; err = cpu_ops[cpu]->cpu_prepare(cpu); if (err) continue; set_cpu_present(cpu, true); numa_store_cpu_info(cpu); } }
void __init start_parisc(void) { extern void early_trap_init(void); int ret, cpunum; struct pdc_coproc_cfg coproc_cfg; cpunum = smp_processor_id(); init_cpu_topology(); set_firmware_width_unlocked(); ret = pdc_coproc_cfg_unlocked(&coproc_cfg); if (ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; asm volatile ("fstd %fr0,8(%sp)"); } else {
void __init smp_prepare_cpus(unsigned int max_cpus) { int err; unsigned int cpu, ncores = num_possible_cpus(); init_cpu_topology(); smp_store_cpu_info(smp_processor_id()); if (max_cpus > ncores) max_cpus = ncores; if (max_cpus <= 1) return; max_cpus--; for_each_possible_cpu(cpu) { if (max_cpus == 0) break; if (cpu == smp_processor_id()) continue; if (!cpu_ops[cpu]) continue; err = cpu_ops[cpu]->cpu_prepare(cpu); if (err) continue; set_cpu_present(cpu, true); max_cpus--; } }