/* * By the time APs call ap_init() caching has been setup, and microcode has * been loaded */ static void ap_init(unsigned int cpu_index) { struct udevice *dev; int apic_id; int ret; /* Ensure the local apic is enabled */ enable_lapic(); apic_id = lapicid(); ret = find_cpu_by_apid_id(apic_id, &dev); if (ret) { debug("Unknown CPU apic_id %x\n", apic_id); goto done; } debug("AP: slot %d apic_id %x, dev %s\n", cpu_index, apic_id, dev ? dev->name : "(apic_id not found)"); /* Walk the flight plan */ ap_do_flight_plan(dev); /* Park the AP */ debug("parking\n"); done: stop_this_cpu(); }
asmlinkage __visible void smp_reboot_interrupt(void) { ipi_entering_ack_irq(); cpu_emergency_vmxoff(); stop_this_cpu(NULL); irq_exit(); }
asmlinkage void smp_reboot_interrupt(void) { ack_APIC_irq(); irq_enter(); stop_this_cpu(NULL); irq_exit(); }
/* C entry point of secondary cpus */ asmlinkage void secondary_cpu_init(unsigned int index) { atomic_inc(&active_cpus); if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT)) spin_lock(&start_cpu_lock); #ifdef __SSE3__ /* * Seems that CR4 was cleared when AP start via lapic_start_cpu() * Turn on CR4.OSFXSR and CR4.OSXMMEXCPT when SSE options enabled */ u32 cr4_val; cr4_val = read_cr4(); cr4_val |= (CR4_OSFXSR | CR4_OSXMMEXCPT); write_cr4(cr4_val); #endif cpu_initialize(index); if (!IS_ENABLED(CONFIG_PARALLEL_CPU_INIT)) spin_unlock(&start_cpu_lock); atomic_dec(&active_cpus); stop_this_cpu(); }
void handle_IPI (int irq, void *dev_id, struct pt_regs *regs) { int this_cpu = smp_processor_id(); unsigned long *pending_ipis = &local_cpu_data->ipi.operation; unsigned long ops; /* Count this now; we may make a call that never returns. */ local_cpu_data->ipi_count++; mb(); /* Order interrupt and bit testing. */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* Order bit clearing and data access. */ do { unsigned long which; which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_CALL_FUNC: { struct call_data_struct *data; void (*func)(void *info); void *info; int wait; /* release the 'pointer lock' */ data = (struct call_data_struct *) call_data; func = data->func; info = data->info; wait = data->wait; mb(); atomic_inc(&data->started); /* At this point the structure may be gone unless wait is true. */ (*func)(info); /* Notify the sending CPU that the task is done. */ mb(); if (wait) atomic_inc(&data->finished); } break; case IPI_CPU_STOP: stop_this_cpu(); break; default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } /* Switch */ } while (ops); mb(); /* Order data access and bit testing. */ } }
static void native_machine_halt(void) { /* stop other cpus */ machine_shutdown(); /* stop this cpu */ stop_this_cpu(NULL); }
static void native_machine_halt(void) { /* Stop other cpus and apics */ machine_shutdown(); tboot_shutdown(TB_SHUTDOWN_HALT); stop_this_cpu(NULL); }
static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs) { /* We are registered on stopping cpu too, avoid spurious NMI */ if (raw_smp_processor_id() == atomic_read(&stopping_cpu)) return NMI_HANDLED; stop_this_cpu(NULL); return NMI_HANDLED; }
irqreturn_t handle_IPI (int irq, void *dev_id) { int this_cpu = get_cpu(); unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); unsigned long ops; mb(); /* */ while ((ops = xchg(pending_ipis, 0)) != 0) { mb(); /* */ do { unsigned long which; which = ffz(~ops); ops &= ~(1 << which); switch (which) { case IPI_CPU_STOP: stop_this_cpu(); break; case IPI_CALL_FUNC: generic_smp_call_function_interrupt(); break; case IPI_CALL_FUNC_SINGLE: generic_smp_call_function_single_interrupt(); break; #ifdef CONFIG_KEXEC case IPI_KDUMP_CPU_STOP: unw_init_running(kdump_cpu_freeze, NULL); break; #endif default: printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); break; } } while (ops); mb(); /* */ } put_cpu(); return IRQ_HANDLED; }