/* Called by boot processor to activate the rest. */ static void __init smp_init(void) { /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); smp_threads_ready=1; smp_commence(); }
/* Called by boot processor to activate the rest. */ static void __init smp_init(void) { /* Get other processors into their bootup holding patterns. */ smp_boot_cpus(); wait_init_idle = cpu_online_map; clear_bit(current->processor, &wait_init_idle); /* Don't wait on me! */ smp_threads_ready=1; smp_commence(); /* Wait for the other cpus to set up their idle processes */ printk("Waiting on wait_init_idle (map = 0x%lx)\n", wait_init_idle); while (wait_init_idle) { cpu_relax(); barrier(); } printk("All processors have done init_idle\n"); }
static void smp_init(void) { int i, j; smp_boot_cpus(); /* * Create the slave init tasks as sharing pid 0. * * This should only happen if we have virtual CPU numbers * higher than 0. */ for (i=1; i<smp_num_cpus; i++) { struct task_struct *n, *p; j = cpu_logical_map[i]; /* * We use kernel_thread for the idlers which are * unlocked tasks running in kernel space. */ kernel_thread(cpu_idle, NULL, CLONE_PID); /* * Don't assume linear processor numbering */ current_set[j]=task[i]; current_set[j]->processor=j; cli(); n = task[i]->next_run; p = task[i]->prev_run; nr_running--; n->prev_run = p; p->next_run = n; task[i]->next_run = task[i]->prev_run = task[i]; sti(); } }
/* These are wrappers to interface to the new boot process. Someone who understands all this stuff should rewrite it properly. --RR 15/Jul/02 */ void __init smp_prepare_cpus(unsigned int max_cpus) { smp_boot_cpus(max_cpus); }