/* * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu * and keep control until "cpu_online(cpu)" is set. Note: cpu is * physical, not logical. */ int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); prom_boot_secondary(cpu, idle); /* * Trust is futile. We should really have timeouts ... */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); cpu_set(cpu, cpu_online_map); return 0; }
void __init smp_boot_cpus(void) { int i; smp_num_cpus = prom_setup_smp(); init_new_context(current, &init_mm); current->processor = 0; atomic_set(&cpus_booted, 1); /* Master CPU is already booted... */ init_idle(); for (i = 1; i < smp_num_cpus; i++) { struct task_struct *p; struct pt_regs regs; printk("Starting CPU %d... ", i); /* Spawn a new process normally. Grab a pointer to its task struct so we can mess with it */ do_fork(CLONE_VM|CLONE_PID, 0, ®s, 0); p = init_task.prev_task; /* Schedule the first task manually */ p->processor = i; p->cpus_runnable = 1 << i; /* we schedule the first task manually */ /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); p->active_mm = &init_mm; init_tasks[i] = p; del_from_runqueue(p); unhash_process(p); prom_boot_secondary(i, (unsigned long)p + KERNEL_STACK_SIZE - 32, (unsigned long)p); #if 0 /* This is copied from the ip-27 code in the mips64 tree */ struct task_struct *p; /* * The following code is purely to make sure * Linux can schedule processes on this slave. */ kernel_thread(0, NULL, CLONE_PID); p = init_task.prev_task; sprintf(p->comm, "%s%d", "Idle", i); init_tasks[i] = p; p->processor = i; p->cpus_runnable = 1 << i; /* we schedule the first task manually * del_from_runqueue(p); unhash_process(p); /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); p->active_mm = &init_mm; prom_boot_secondary(i, (unsigned long)p + KERNEL_STACK_SIZE - 32, (unsigned long)p); #endif } /* Wait for everyone to come up */ while (atomic_read(&cpus_booted) != smp_num_cpus); }