static void __cpuinit cpu_bringup(void) { int cpu; cpu_init(); touch_softlockup_watchdog(); preempt_disable(); xen_enable_sysenter(); xen_enable_syscall(); cpu = smp_processor_id(); smp_store_cpu_info(cpu); cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); xen_setup_cpu_clockevents(); notify_cpu_starting(cpu); ipi_call_lock(); set_cpu_online(cpu, true); ipi_call_unlock(); this_cpu_write(cpu_state, CPU_ONLINE); wmb(); local_irq_enable(); wmb(); }
/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) snapshot_timebase(); secondary_cpu_time_init(); ipi_call_lock(); cpu_set(cpu, cpu_online_map); ipi_call_unlock(); local_irq_enable(); cpu_idle(); return 0; }
/* * Activate a secondary processor. */ int __cpuinit start_secondary(void *cpuvoid) { /* Setup the cpu */ cpu_init(); preempt_disable(); /* Enable TOD clock interrupts on the secondary cpu. */ init_cpu_timer(); /* Enable cpu timer interrupts on the secondary cpu. */ init_cpu_vtimer(); /* Enable pfault pseudo page faults on this cpu. */ pfault_init(); /* call cpu notifiers */ notify_cpu_starting(smp_processor_id()); /* Mark this cpu as online */ ipi_call_lock(); set_cpu_online(smp_processor_id(), true); ipi_call_unlock(); __ctl_clear_bit(0, 28); /* Disable lowcore protection */ S390_lowcore.restart_psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; S390_lowcore.restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; __ctl_set_bit(0, 28); /* Enable lowcore protection */ local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); return 0; }
/* * Activate a secondary processor. */ int __cpuinit start_secondary(void *cpuvoid) { cpu_init(); preempt_disable(); init_cpu_timer(); init_cpu_vtimer(); pfault_init(); notify_cpu_starting(smp_processor_id()); ipi_call_lock(); set_cpu_online(smp_processor_id(), true); ipi_call_unlock(); __ctl_clear_bit(0, 28); /* Disable lowcore protection */ S390_lowcore.restart_psw.mask = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA; S390_lowcore.restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler; __ctl_set_bit(0, 28); /* Enable lowcore protection */ /* * Wait until the cpu which brought this one up marked it * active before enabling interrupts. */ while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask)) cpu_relax(); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); return 0; }
/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) snapshot_timebase(); secondary_cpu_time_init(); ipi_call_lock(); notify_cpu_starting(cpu); cpu_set(cpu, cpu_online_map); /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; cpu_set(cpu, per_cpu(cpu_sibling_map, base + i)); cpu_set(base + i, per_cpu(cpu_sibling_map, cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpu_set(cpu, per_cpu(cpu_core_map, base +i)); cpu_set(base + i, per_cpu(cpu_core_map, cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpu_set(cpu, per_cpu(cpu_core_map, i)); cpu_set(i, per_cpu(cpu_core_map, cpu)); } of_node_put(np); } of_node_put(l2_cache); ipi_call_unlock(); local_irq_enable(); cpu_idle(); return 0; }
/* * Activate a secondary processor. */ notrace static void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too * fragile that we want to limit the things done here to the * most necessary things. */ cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* * Check TSC synchronization with the BP: */ check_tsc_sync_target(); /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). * * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. */ ipi_call_lock(); lock_vector_lock(); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); /* enable local interrupts */ local_irq_enable(); /* to prevent fake stack check failure in clock setup */ boot_init_stack_canary(); x86_cpuinit.setup_percpu_clockev(); wmb(); cpu_idle(); }
/* * Called by secondaries to update state and initialize CPU registers. */ static void __init smp_cpu_init(int cpunum) { extern int init_per_cpu(int); /* arch/parisc/kernel/processor.c */ extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */ extern void start_cpu_itimer(void); /* arch/parisc/kernel/time.c */ /* Set modes and Enable floating point coprocessor */ (void) init_per_cpu(cpunum); disable_sr_hashing(); mb(); /* Well, support 2.4 linux scheme as well. */ if (cpu_online(cpunum)) { extern void machine_halt(void); /* arch/parisc.../process.c */ printk(KERN_CRIT "CPU#%d already initialized!\n", cpunum); machine_halt(); } notify_cpu_starting(cpunum); ipi_call_lock(); set_cpu_online(cpunum, true); ipi_call_unlock(); /* Initialise the idle task for this CPU */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); enter_lazy_tlb(&init_mm, current); init_IRQ(); /* make sure no IRQs are enabled or pending */ start_cpu_itimer(); }
/* * Bring a secondary processor online. */ void __cpuinit online_secondary(void) { /* * low-memory mappings have been cleared, flush them from * the local TLBs too. */ local_flush_tlb(); BUG_ON(in_interrupt()); /* This must be done before setting cpu_online_mask */ wmb(); /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). */ ipi_call_lock(); set_cpu_online(smp_processor_id(), 1); ipi_call_unlock(); __get_cpu_var(cpu_state) = CPU_ONLINE; /* Set up tile-specific state for this cpu. */ setup_cpu(0); /* Set up tile-timer clock-event device on this cpu */ setup_tile_timer(); preempt_enable(); cpu_idle(); }
/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); struct device_node *l2_cache; int i, base; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) /* Clear any pending timer interrupts */ mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS); /* Enable decrementer interrupt */ mtspr(SPRN_TCR, TCR_DIE); #endif set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; if (smp_ops->setup_cpu) smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) snapshot_timebase(); secondary_cpu_time_init(); ipi_call_lock(); notify_cpu_starting(cpu); set_cpu_online(cpu, true); /* Update sibling maps */ base = cpu_first_thread_in_core(cpu); for (i = 0; i < threads_per_core; i++) { if (cpu_is_offline(base + i)) continue; cpumask_set_cpu(cpu, cpu_sibling_mask(base + i)); cpumask_set_cpu(base + i, cpu_sibling_mask(cpu)); /* cpu_core_map should be a superset of * cpu_sibling_map even if we don't have cache * information, so update the former here, too. */ cpumask_set_cpu(cpu, cpu_core_mask(base + i)); cpumask_set_cpu(base + i, cpu_core_mask(cpu)); } l2_cache = cpu_to_l2cache(cpu); for_each_online_cpu(i) { struct device_node *np = cpu_to_l2cache(i); if (!np) continue; if (np == l2_cache) { cpumask_set_cpu(cpu, cpu_core_mask(i)); cpumask_set_cpu(i, cpu_core_mask(cpu)); } of_node_put(np); } of_node_put(l2_cache); ipi_call_unlock(); local_irq_enable(); cpu_idle(); return 0; }