/* Activate a secondary processor. */ int __init start_secondary(void *unused) { atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_callin(); return cpu_idle(NULL); }
asmlinkage void start_secondary(void) { trap_init(); init_IRQ(); smp_callin(); cpu_idle(NULL); }
/* * Activate a secondary processor. */ static void __init start_secondary(void *unused) { /* * Dont put anything before smp_callin(), SMP * booting is too fragile that we want to limit the * things done here to the most necessary things. */ cpu_init(); smp_callin(); while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) rep_nop(); setup_secondary_APIC_clock(); if (nmi_watchdog == NMI_IO_APIC) { disable_8259A_irq(0); enable_NMI_through_LVT0(NULL); enable_8259A_irq(0); } enable_APIC_timer(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. */ local_flush_tlb(); cpu_set(smp_processor_id(), cpu_online_map); /* We can take interrupts now: we're officially "up". */ local_irq_enable(); wmb(); cpu_idle(); }
asmlinkage void start_secondary(void) { trap_init(); #ifndef CONFIG_OSFMACH3 init_IRQ(); #endif /* CONFIG_OSFMACH3 */ smp_callin(); cpu_idle(NULL); }
/* * Activate a secondary processor. */ notrace static void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too * fragile that we want to limit the things done here to the * most necessary things. */ cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); enable_start_cpu0 = 0; #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* * Check TSC synchronization with the BP: */ check_tsc_sync_target(); /* * Enable the espfix hack for this CPU */ #ifdef CONFIG_X86_ESPFIX64 init_espfix_ap(); #endif /* * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. */ lock_vector_lock(); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); /* enable local interrupts */ local_irq_enable(); /* to prevent fake stack check failure in clock setup */ boot_init_stack_canary(); x86_cpuinit.setup_percpu_clockev(); wmb(); cpu_startup_entry(CPUHP_ONLINE); }
/* * Activate a secondary processor. */ notrace static void __cpuinit start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too * fragile that we want to limit the things done here to the * most necessary things. */ cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* * Check TSC synchronization with the BP: */ check_tsc_sync_target(); /* * We need to hold call_lock, so there is no inconsistency * between the time smp_call_function() determines number of * IPI recipients, and the time when the determination is made * for which cpus receive the IPI. Holding this * lock helps us to not include this cpu in a currently in progress * smp_call_function(). * * We need to hold vector_lock so there the set of online cpus * does not change while we are assigning vectors to cpus. Holding * this lock ensures we don't half assign or remove an irq from a cpu. */ ipi_call_lock(); lock_vector_lock(); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); ipi_call_unlock(); per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; x86_platform.nmi_init(); /* enable local interrupts */ local_irq_enable(); /* to prevent fake stack check failure in clock setup */ boot_init_stack_canary(); x86_cpuinit.setup_percpu_clockev(); wmb(); cpu_idle(); }
/* Activate a secondary processor. */ int start_secondary(void *unused) { int cpu; cpu = current->processor; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_callin(); /* Go into the idle loop. */ return cpu_idle(NULL); }
/* * Activate a secondary processor. head.S calls this. */ int __devinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); cpu_init(); smp_callin(); cpu_idle(); return 0; }
/* * Activate a secondary processor. */ static void notrace start_secondary(void *unused) { /* * Don't put *anything* before cpu_init(), SMP booting is too * fragile that we want to limit the things done here to the * most necessary things. */ cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); enable_start_cpu0 = 0; #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); __flush_tlb_all(); #endif /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* * Check TSC synchronization with the BP: */ check_tsc_sync_target(); /* * Lock vector_lock and initialize the vectors on this cpu * before setting the cpu online. We must set it online with * vector_lock held to prevent a concurrent setup/teardown * from seeing a half valid vector space. */ lock_vector_lock(); setup_vector_irq(smp_processor_id()); set_cpu_online(smp_processor_id(), true); unlock_vector_lock(); cpu_set_state_online(smp_processor_id()); x86_platform.nmi_init(); /* enable local interrupts */ local_irq_enable(); /* to prevent fake stack check failure in clock setup */ boot_init_stack_canary(); x86_cpuinit.setup_percpu_clockev(); wmb(); cpu_startup_entry(CPUHP_ONLINE); }
/* * Activate a secondary processor. head.S calls this. */ int __cpuinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef CONFIG_PRINTK_TIME Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); #endif efi_map_pal_code(); cpu_init(); preempt_disable(); smp_callin(); cpu_idle(); return 0; }
/* * Activate a secondary processor. head.S calls this. */ int __init start_secondary (void *unused) { extern int cpu_idle (void); Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); cpu_init(); smp_callin(); Dprintk("CPU %d is set to go.\n", smp_processor_id()); while (!atomic_read(&smp_commenced)) ; Dprintk("CPU %d is starting idle.\n", smp_processor_id()); return cpu_idle(); }
/*==========================================================================* * Name: start_secondary * * Description: This routine activate a secondary processor. * * Born on Date: 2002.02.05 * * Arguments: *unused - currently unused. * * Returns: void (cannot fail) * * Modification log: * Date Who Description * ---------- --- -------------------------------------------------------- * 2003-06-24 hy modify for linux-2.5.69 * *==========================================================================*/ int __init start_secondary(void *unused) { cpu_init(); smp_callin(); while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) cpu_relax(); smp_online(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. */ local_flush_tlb_all(); cpu_idle(); return 0; }
/* Activate a secondary processor. */ int start_secondary(void *unused) { int cpu; cpu = current->processor; atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_callin(); get_paca()->yielded = 0; if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) { vpa_init(cpu); } /* Go into the idle loop. */ return cpu_idle(NULL); }
/* * Activate a secondary processor. */ void __init start_secondary(void) { /* * Dont put anything before smp_callin(), SMP * booting is too fragile that we want to limit the * things done here to the most necessary things. */ cpu_init(); smp_callin(); /* otherwise gcc will move up the smp_processor_id before the cpu_init */ barrier(); Dprintk("cpu %d: waiting for commence\n", smp_processor_id()); while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) rep_nop(); Dprintk("cpu %d: setting up apic clock\n", smp_processor_id()); setup_secondary_APIC_clock(); Dprintk("cpu %d: enabling apic timer\n", smp_processor_id()); if (nmi_watchdog == NMI_IO_APIC) { disable_8259A_irq(0); enable_NMI_through_LVT0(NULL); enable_8259A_irq(0); } enable_APIC_timer(); /* * low-memory mappings have been cleared, flush them from * the local TLBs too. */ local_flush_tlb(); Dprintk("cpu %d eSetting cpu_online_map\n", smp_processor_id()); cpu_set(smp_processor_id(), cpu_online_map); wmb(); cpu_idle(); }
/* * Activate a secondary processor. head.S calls this. */ int __devinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef XEN Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); #endif cpu_init(); smp_callin(); #ifdef XEN if (vmx_enabled) vmx_init_env(0, 0); startup_cpu_idle_loop(); #else cpu_idle(); #endif return 0; }
/* * Activate a secondary processor. */ static void notrace start_secondary(void *unused) { /* * Don't put *anything* except direct CPU state initialization * before cpu_init(), SMP booting is too fragile that we want to * limit the things done here to the most necessary things. */ if (boot_cpu_has(X86_FEATURE_PCID)) __write_cr4(__read_cr4() | X86_CR4_PCIDE); #ifdef CONFIG_X86_32 /* switch away from the initial page table */ load_cr3(swapper_pg_dir); /* * Initialize the CR4 shadow before doing anything that could * try to read it. */ cr4_init_shadow(); __flush_tlb_all(); #endif load_current_idt(); cpu_init(); x86_cpuinit.early_percpu_clock_init(); preempt_disable(); smp_callin(); enable_start_cpu0 = 0; /* otherwise gcc will move up smp_processor_id before the cpu_init */ barrier(); /* * Check TSC synchronization with the boot CPU: */ check_tsc_sync_target(); speculative_store_bypass_ht_init(); /* * Lock vector_lock, set CPU online and bring the vector * allocator online. Online must be set with vector_lock held * to prevent a concurrent irq setup/teardown from seeing a * half valid vector space. */ lock_vector_lock(); set_cpu_online(smp_processor_id(), true); lapic_online(); unlock_vector_lock(); cpu_set_state_online(smp_processor_id()); x86_platform.nmi_init(); /* enable local interrupts */ local_irq_enable(); /* to prevent fake stack check failure in clock setup */ boot_init_stack_canary(); x86_cpuinit.setup_percpu_clockev(); wmb(); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); }