/* * Called once for each "cpu_possible(cpu)". Needs to spin up the cpu * and keep control until "cpu_online(cpu)" is set. Note: cpu is * physical, not logical. */ int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; /* * Processor goes to start_secondary(), sets online flag * The following code is purely to make sure * Linux can schedule processes on this slave. */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); prom_boot_secondary(cpu, idle); /* * Trust is futile. We should really have timeouts ... */ while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); cpu_set(cpu, cpu_online_map); return 0; }
int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *tsk; unsigned long timeout; tsk = fork_idle(cpu); if (IS_ERR(tsk)) { printk(KERN_ERR "Failed forking idle task for cpu %d\n", cpu); return PTR_ERR(tsk); } /* Fill in data in head.S for secondary cpus */ stack_start.sp = tsk->thread.sp; stack_start.thread_info = tsk->stack; stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ stack_start.start_kernel_fn = start_secondary; flush_cache_all(); plat_start_cpu(cpu, (unsigned long)_stext); timeout = jiffies + HZ; while (time_before(jiffies, timeout)) { if (cpu_online(cpu)) break; udelay(10); } if (cpu_online(cpu)) return 0; return -ENOENT; }
/* Bring one cpu online.*/ static int __init smp_boot_one_cpu(int cpuid) { unsigned timeout; struct task_struct *idle; idle = fork_idle(cpuid); if (IS_ERR(idle)) panic("SMP: fork failed for CPU:%d", cpuid); task_thread_info(idle)->cpu = cpuid; /* Information to the CPU that is about to boot */ smp_init_current_idle_thread = task_thread_info(idle); cpu_now_booting = cpuid; /* Wait for CPU to come online */ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { cpu_now_booting = 0; smp_init_current_idle_thread = NULL; return 0; /* CPU online */ } udelay(100); barrier(); } put_task_struct(idle); idle = NULL; printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); xen_cpu_initialized_map = cpumask_of_cpu(0); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = NR_CPUS - 1; !cpu_possible(cpu); cpu--) continue; cpu_clear(cpu, cpu_possible_map); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); cpu_set(cpu, cpu_present_map); } }
void do_fork_idle(void *_c_idle) { struct create_idle *c_idle = _c_idle; c_idle->idle = fork_idle(c_idle->cpu); complete(&c_idle->done); }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; unsigned int i; if (skip_ioapic_setup) { char *m = (max_cpus == 0) ? "The nosmp parameter is incompatible with Xen; " \ "use Xen dom0_max_vcpus=1 parameter" : "The noapic parameter is incompatible with Xen"; xen_raw_printk(m); panic(m); } xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) panic("could not allocate xen_cpu_initialized_map\n"); cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); set_cpu_present(cpu, true); } }
static void __init smp_create_idle(unsigned int cpu) { struct task_struct *p; /* create a process for the processor */ p = fork_idle(cpu); if (IS_ERR(p)) panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); #ifdef CONFIG_PPC64 paca[cpu].__current = p; #endif current_set[cpu] = task_thread_info(p); task_thread_info(p)->cpu = cpu; }
int __cpu_up(unsigned int cpu) { struct task_struct *tsk; tsk = fork_idle(cpu); if (IS_ERR(tsk)) panic("Failed forking idle task for cpu %d\n", cpu); task_thread_info(tsk)->cpu = cpu; cpu_set(cpu, cpu_online_map); return 0; }
int __cpu_up(unsigned int cpu) { struct task_struct *p; char buf[32]; int c; /* create a process for the processor */ /* only regs.msr is actually used, and 0 is OK for it */ p = fork_idle(cpu); if (IS_ERR(p)) panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); secondary_ti = p->thread_info; p->thread_info->cpu = cpu; /* * There was a cache flush loop here to flush the cache * to memory for the first 8MB of RAM. The cache flush * has been pushed into the kick_cpu function for those * platforms that need it. */ /* wake up cpu */ smp_ops->kick_cpu(cpu); /* * wait to see if the cpu made a callin (is actually up). * use this value that I found through experimentation. * -- Cort */ for (c = 1000; c && !cpu_callin_map[cpu]; c--) udelay(100); if (!cpu_callin_map[cpu]) { sprintf(buf, "didn't find cpu %u", cpu); if (ppc_md.progress) ppc_md.progress(buf, 0x360+cpu); printk("Processor %u is stuck.\n", cpu); return -ENOENT; } sprintf(buf, "found cpu %u", cpu); if (ppc_md.progress) ppc_md.progress(buf, 0x350+cpu); printk("Processor %d found.\n", cpu); smp_ops->give_timebase(); cpu_set(cpu, cpu_online_map); return 0; }
static void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; unsigned int i; xen_init_lock_cpu(0); smp_store_cpu_info(0); cpu_data(0).x86_max_cores = 1; for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL); } set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) panic("could not allocate xen_cpu_initialized_map\n"); cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; set_cpu_possible(cpu, false); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); set_cpu_present(cpu, true); } }
int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *tsk; unsigned long timeout; tsk = cpu_data[cpu].idle; if (!tsk) { tsk = fork_idle(cpu); if (IS_ERR(tsk)) { pr_err("Failed forking idle task for cpu %d\n", cpu); return PTR_ERR(tsk); } cpu_data[cpu].idle = tsk; } per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* Fill in data in head.S for secondary cpus */ stack_start.sp = tsk->thread.sp; stack_start.thread_info = tsk->stack; stack_start.bss_start = 0; /* don't clear bss for secondary cpus */ stack_start.start_kernel_fn = start_secondary; flush_icache_range((unsigned long)&stack_start, (unsigned long)&stack_start + sizeof(stack_start)); wmb(); mp_ops->start_cpu(cpu, (unsigned long)_stext); timeout = jiffies + HZ; while (time_before(jiffies, timeout)) { if (cpu_online(cpu)) break; udelay(10); barrier(); } if (cpu_online(cpu)) return 0; return -ENOENT; }
void __init xen_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; for_each_possible_cpu(cpu) { cpus_clear(per_cpu(cpu_sibling_map, cpu)); /* * cpu_core_ map will be zeroed when the per * cpu area is allocated. * * cpus_clear(per_cpu(cpu_core_map, cpu)); */ } smp_store_cpu_info(0); set_cpu_sibling_map(0); if (xen_smp_intr_init(0)) BUG(); cpu_initialized_map = cpumask_of_cpu(0); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--) continue; cpu_clear(cpu, cpu_possible_map); } for_each_possible_cpu (cpu) { struct task_struct *idle; if (cpu == 0) continue; idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); cpu_set(cpu, cpu_present_map); } //init_xenbus_allowed_cpumask(); }
static void __init do_boot_cpu (int apicid) { struct task_struct *idle; unsigned long boot_error; int timeout, cpu; unsigned long start_rip; cpu = ++cpucount; /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); x86_cpu_to_apicid[cpu] = apicid; cpu_pda[cpu].pcurrent = idle; start_rip = setup_trampoline(); init_rsp = idle->thread.rsp; per_cpu(init_tss,cpu).rsp0 = init_rsp; initial_code = start_secondary; clear_ti_thread_flag(idle->thread_info, TIF_FORK); printk(KERN_INFO "Booting processor %d/%d rip %lx rsp %lx\n", cpu, apicid, start_rip, init_rsp); /* * This grunge runs the startup process for * the targeted processor. */ atomic_set(&init_deasserted, 0); Dprintk("Setting warm reset code and vector.\n"); CMOS_WRITE(0xa, 0xf); local_flush_tlb(); Dprintk("1.\n"); *((volatile unsigned short *) phys_to_virt(0x469)) = start_rip >> 4; Dprintk("2.\n"); *((volatile unsigned short *) phys_to_virt(0x467)) = start_rip & 0xf; Dprintk("3.\n"); /* * Be paranoid about clearing APIC errors. */ if (APIC_INTEGRATED(apic_version[apicid])) { apic_read_around(APIC_SPIV); apic_write(APIC_ESR, 0); apic_read(APIC_ESR); } /* * Status is now clean */ boot_error = 0; /* * Starting actual IPI sequence... */ boot_error = wakeup_secondary_via_INIT(apicid, start_rip); if (!boot_error) { /* * allow APs to start initializing. */ Dprintk("Before Callout %d.\n", cpu); cpu_set(cpu, cpu_callout_map); Dprintk("After Callout %d.\n", cpu); /* * Wait 5s total for a response */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_callin_map)) break; /* It has booted */ udelay(100); } if (cpu_isset(cpu, cpu_callin_map)) { /* number CPUs logically, starting from 1 (BSP is 0) */ Dprintk("OK.\n"); print_cpu_info(&cpu_data[cpu]); Dprintk("CPU has booted.\n"); } else { boot_error = 1; if (*((volatile unsigned char *)phys_to_virt(SMP_TRAMPOLINE_BASE)) == 0xA5) /* trampoline started but...? */ printk("Stuck ??\n"); else /* trampoline code not run */ printk("Not responding.\n"); #if APIC_DEBUG inquire_remote_apic(apicid); #endif } } if (boot_error) { cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ clear_bit(cpu, &cpu_initialized); /* was set by cpu_init() */ cpucount--; x86_cpu_to_apicid[cpu] = BAD_APICID; x86_cpu_to_log_apicid[cpu] = BAD_APICID; } }
/*==========================================================================* * Name: do_boot_cpu * * Description: This routine boot up one AP. * * Born on Date: 2002.02.05 * * Arguments: phys_id - Target CPU physical ID * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * 2003-06-24 hy modify for linux-2.5.69 * *==========================================================================*/ static void __init do_boot_cpu(int phys_id) { struct task_struct *idle; unsigned long send_status, boot_status; int timeout, cpu_id; cpu_id = ++cpucount; /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ idle = fork_idle(cpu_id); if (IS_ERR(idle)) panic("failed fork for CPU#%d.", cpu_id); idle->thread.lr = (unsigned long)start_secondary; map_cpu_to_physid(cpu_id, phys_id); /* So we see what's up */ printk("Booting processor %d/%d\n", phys_id, cpu_id); stack_start.spi = (void *)idle->thread.sp; task_thread_info(idle)->cpu = cpu_id; /* * Send Startup IPI * 1.IPI received by CPU#(phys_id). * 2.CPU#(phys_id) enter startup_AP (arch/m32r/kernel/head.S) * 3.CPU#(phys_id) enter start_secondary() */ send_status = 0; boot_status = 0; cpu_set(phys_id, cpu_bootout_map); /* Send Startup IPI */ send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); Dprintk("Waiting for send to finish...\n"); timeout = 0; /* Wait 100[ms] */ do { Dprintk("+"); udelay(1000); send_status = !cpu_isset(phys_id, cpu_bootin_map); } while (send_status && (timeout++ < 100)); Dprintk("After Startup.\n"); if (!send_status) { /* * allow APs to start initializing. */ Dprintk("Before Callout %d.\n", cpu_id); cpu_set(cpu_id, cpu_callout_map); Dprintk("After Callout %d.\n", cpu_id); /* * Wait 5s total for a response */ for (timeout = 0; timeout < 5000; timeout++) { if (cpu_isset(cpu_id, cpu_callin_map)) break; /* It has booted */ udelay(1000); } if (cpu_isset(cpu_id, cpu_callin_map)) { /* number CPUs logically, starting from 1 (BSP is 0) */ Dprintk("OK.\n"); } else { boot_status = 1; printk("Not responding.\n"); } } else printk("IPI never delivered???\n"); if (send_status || boot_status) { unmap_cpu_to_physid(cpu_id, phys_id); cpu_clear(cpu_id, cpu_callout_map); cpu_clear(cpu_id, cpu_callin_map); cpu_clear(cpu_id, cpu_initialized); cpucount--; } }
/* * Bring one cpu online. */ int smp_boot_one_cpu(int cpuid) { const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid); struct task_struct *idle; long timeout; /* * Create an idle task for this CPU. Note the address wed* give * to kernel_thread is irrelevant -- it's going to start * where OS_BOOT_RENDEVZ vector in SAL says to start. But * this gets all the other task-y sort of data structures set * up like we wish. We need to pull the just created idle task * off the run queue and stuff it into the init_tasks[] array. * Sheesh . . . */ idle = fork_idle(cpuid); if (IS_ERR(idle)) panic("SMP: fork failed for CPU:%d", cpuid); task_thread_info(idle)->cpu = cpuid; /* Let _start know what logical CPU we're booting ** (offset into init_tasks[],cpu_data[]) */ cpu_now_booting = cpuid; /* ** boot strap code needs to know the task address since ** it also contains the process stack. */ smp_init_current_idle_task = idle ; mb(); printk(KERN_INFO "Releasing cpu %d now, hpa=%lx\n", cpuid, p->hpa); /* ** This gets PDC to release the CPU from a very tight loop. ** ** From the PA-RISC 2.0 Firmware Architecture Reference Specification: ** "The MEM_RENDEZ vector specifies the location of OS_RENDEZ which ** is executed after receiving the rendezvous signal (an interrupt to ** EIR{0}). MEM_RENDEZ is valid only when it is nonzero and the ** contents of memory are valid." */ gsc_writel(TIMER_IRQ - CPU_IRQ_BASE, p->hpa); mb(); /* * OK, wait a bit for that CPU to finish staggering about. * Slave will set a bit when it reaches smp_cpu_init(). * Once the "monarch CPU" sees the bit change, it can move on. */ for (timeout = 0; timeout < 10000; timeout++) { if(cpu_online(cpuid)) { /* Which implies Slave has started up */ cpu_now_booting = 0; smp_init_current_idle_task = NULL; goto alive ; } udelay(100); barrier(); } put_task_struct(idle); idle = NULL; printk(KERN_CRIT "SMP: CPU:%d is stuck.\n", cpuid); return -1; alive: /* Remember the Slave data */ smp_debug(100, KERN_DEBUG "SMP: CPU:%d came alive after %ld _us\n", cpuid, timeout * 100); return 0; }
static int __init do_boot_cpu(int apicid) /* * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad * (ie clustered apic addressing mode), this is a LOGICAL apic ID. * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. */ { struct task_struct *idle; unsigned long boot_error; int timeout, cpu; unsigned long start_eip; unsigned short nmi_high = 0, nmi_low = 0; cpu = ++cpucount; /* * We can't use kernel_thread since we must avoid to * reschedule the child. */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); idle->thread.eip = (unsigned long) start_secondary; /* start_eip had better be page-aligned! */ start_eip = setup_trampoline(); /* So we see what's up */ printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip); /* Stack for startup_32 can be just as for start_secondary onwards */ stack_start.esp = (void *) idle->thread.esp; irq_ctx_init(cpu); /* * This grunge runs the startup process for * the targeted processor. */ atomic_set(&init_deasserted, 0); Dprintk("Setting warm reset code and vector.\n"); store_NMI_vector(&nmi_high, &nmi_low); smpboot_setup_warm_reset_vector(start_eip); /* * Starting actual IPI sequence... */ boot_error = wakeup_secondary_cpu(apicid, start_eip); if (!boot_error) { /* * allow APs to start initializing. */ Dprintk("Before Callout %d.\n", cpu); cpu_set(cpu, cpu_callout_map); Dprintk("After Callout %d.\n", cpu); /* * Wait 5s total for a response */ for (timeout = 0; timeout < 50000; timeout++) { if (cpu_isset(cpu, cpu_callin_map)) break; /* It has booted */ udelay(100); } if (cpu_isset(cpu, cpu_callin_map)) { /* number CPUs logically, starting from 1 (BSP is 0) */ Dprintk("OK.\n"); printk("CPU%d: ", cpu); print_cpu_info(&cpu_data[cpu]); Dprintk("CPU has booted.\n"); } else { boot_error= 1; if (*((volatile unsigned char *)trampoline_base) == 0xA5) /* trampoline started but...? */ printk("Stuck ??\n"); else /* trampoline code not run */ printk("Not responding.\n"); inquire_remote_apic(apicid); } } x86_cpu_to_apicid[cpu] = apicid; if (boot_error) { /* Try to put things back the way they were before ... */ unmap_cpu_to_logical_apicid(cpu); cpu_clear(cpu, cpu_callout_map); /* was set here (do_boot_cpu()) */ cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ cpucount--; } /* mark "stuck" area as not stuck */ *((volatile unsigned long *)trampoline_base) = 0; return boot_error; }
/* * Called at the top of init() to launch all the other CPUs. * They run free to complete their initialization and then wait * until they get an IPI from the boot cpu to come online. */ void __init smp_prepare_cpus(unsigned int max_cpus) { long rc; int cpu, cpu_count; int boot_cpu = smp_processor_id(); current_thread_info()->cpu = boot_cpu; /* * Pin this task to the boot CPU while we bring up the others, * just to make sure we don't uselessly migrate as they come up. */ rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); if (rc != 0) pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); /* Print information about disabled and dataplane cpus. */ print_disabled_cpus(); /* * Tell the messaging subsystem how to respond to the * startup message. We use a level of indirection to avoid * confusing the linker with the fact that the messaging * subsystem is calling __init code. */ start_cpu_function_addr = (unsigned long) &online_secondary; /* Set up thread context for all new processors. */ cpu_count = 1; for (cpu = 0; cpu < NR_CPUS; ++cpu) { struct task_struct *idle; if (cpu == boot_cpu) continue; if (!cpu_possible(cpu)) { /* * Make this processor do nothing on boot. * Note that we don't give the boot_pc function * a stack, so it has to be assembly code. */ per_cpu(boot_sp, cpu) = 0; per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; continue; } /* Create a new idle thread to run start_secondary() */ idle = fork_idle(cpu); if (IS_ERR(idle)) panic("failed fork for CPU %d", cpu); idle->thread.pc = (unsigned long) start_secondary; /* Make this thread the boot thread for this processor */ per_cpu(boot_sp, cpu) = task_ksp0(idle); per_cpu(boot_pc, cpu) = idle->thread.pc; ++cpu_count; } BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); /* Fire up the other tiles, if any */ init_cpu_present(cpu_possible_mask); if (cpumask_weight(cpu_present_mask) > 1) { mb(); /* make sure all data is visible to new processors */ hv_start_all_tiles(); } }