static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); /* We don't want to take decrementer interrupts while we are offline, * so clear LPCR:PECE1. We keep PECE2 enabled. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { power7_nap(); if (!generic_check_cpu_restart(cpu)) { DBG("CPU%d Unexpected exit while offline !\n", cpu); /* We may be getting an IPI, so we re-enable * interrupts to process it, it will be ignored * since we aren't online (hopefully) */ local_irq_enable(); local_irq_disable(); } } mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); }
void play_dead(void) { idle_task_exit(); cpu_play_dead = 1; /* * Wakeup is on SW0 or SW1; disable everything else * Use BEV !IV (BRCM_WARM_RESTART_VEC) to avoid the regular Linux * IRQ handlers; this clears ST0_IE and returns immediately. */ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); irq_disable_hazard(); /* * wait for SW interrupt from brcmstb_boot_secondary(), then jump * back to start_secondary() */ do { __asm__ __volatile__( " wait\n" " nop\n" : : : "memory"); } while (cpu_play_dead); __asm__ __volatile__( " j brcmstb_tp1_reentry\n" : : : "memory"); }
/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void cpu_die(void) { unsigned int cpu = smp_processor_id(); aee_rr_rec_hoplug(cpu, 51, 0); idle_task_exit(); aee_rr_rec_hoplug(cpu, 52, 0); local_irq_disable(); aee_rr_rec_hoplug(cpu, 53, 0); /* Tell __cpu_die() that this CPU is now safe to dispose of */ complete(&cpu_died); aee_rr_rec_hoplug(cpu, 54, 0); /* * Actually shutdown the CPU. This must never fail. The specific hotplug * mechanism must perform all required cache maintenance to ensure that * no dirty lines are lost in the process of shutting down the CPU. */ cpu_ops[cpu]->cpu_die(cpu); aee_rr_rec_hoplug(cpu, 55, 0); BUG(); }
static void __cpuinit smp_85xx_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); u32 tmp; local_irq_disable(); idle_task_exit(); generic_set_cpu_dead(cpu); mb(); mtspr(SPRN_TCR, 0); __flush_disable_L1(); tmp = (mfspr(SPRN_HID0) & ~(HID0_DOZE|HID0_SLEEP)) | HID0_NAP; mtspr(SPRN_HID0, tmp); isync(); /* Enter NAP mode. */ tmp = mfmsr(); tmp |= MSR_WE; mb(); mtmsr(tmp); isync(); while (1) ; }
void cpu_die(void) { unsigned int cpu = smp_processor_id(); #ifdef CONFIG_HTC_DEBUG_FOOTPRINT set_cpu_footprint(cpu, 0x7); #endif idle_task_exit(); #ifdef CONFIG_HTC_DEBUG_FOOTPRINT set_cpu_footprint(cpu, 0x8); #endif local_irq_disable(); #ifdef CONFIG_HTC_DEBUG_FOOTPRINT set_cpu_footprint(cpu, 0x9); #endif complete(&cpu_died); #ifdef CONFIG_HTC_DEBUG_FOOTPRINT set_cpu_footprint(cpu, 0xA); #endif cpu_ops[cpu]->cpu_die(cpu); #ifdef CONFIG_HTC_DEBUG_FOOTPRINT set_cpu_footprint(cpu, 0xB); #endif BUG(); }
/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void __ref cpu_die(void) { idle_task_exit(); local_irq_disable(); __asm__ __volatile__( " movi a2, cpu_restart\n" " jx a2\n"); }
void play_dead_common(void) { idle_task_exit(); irq_ctx_exit(raw_smp_processor_id()); mb(); __this_cpu_write(cpu_state, CPU_DEAD); local_irq_disable(); }
void play_dead_common(void) { idle_task_exit(); irq_ctx_exit(raw_smp_processor_id()); mb(); __get_cpu_var(cpu_state) = CPU_DEAD; local_irq_disable(); }
static inline void play_dead(void) { idle_task_exit(); local_irq_disable(); cpu_clear(smp_processor_id(), cpu_initialized); preempt_enable_no_resched(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); }
static void pSeries_mach_cpu_die(void) { local_irq_disable(); idle_task_exit(); xics_teardown_cpu(); unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow())); rtas_stop_self(); /* Should never get here... */ BUG(); for(;;); }
static void pSeries_mach_cpu_die(void) { local_irq_disable(); idle_task_exit(); /* Some hardware requires clearing the CPPR, while other hardware does not * it is safe either way */ pSeriesLP_cppr_info(0, 0); rtas_stop_self(); /* Should never get here... */ BUG(); for(;;); }
void play_dead(void) { int cpu = cpu_number_map(cvmx_get_core_num()); idle_task_exit(); octeon_processor_boot = 0xff; per_cpu(cpu_state, cpu) = CPU_DEAD; mb(); while (1) /* core will be reset here */ ; }
void generic_mach_cpu_die(void) { unsigned int cpu; local_irq_disable(); idle_task_exit(); cpu = smp_processor_id(); printk(KERN_DEBUG "CPU%d offline\n", cpu); __get_cpu_var(cpu_state) = CPU_DEAD; smp_wmb(); while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) cpu_relax(); }
static inline void play_dead(void) { extern void idle_task_exit(void); /* XXXAP find proper place */ idle_task_exit(); local_irq_disable(); cpu_clear(smp_processor_id(), cpu_initialized); preempt_enable_no_resched(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); /* Same as arch/xen/kernel/smpboot.c:cpu_bringup(). */ cpu_init(); preempt_disable(); local_irq_enable(); }
static void cpu_exit_clear(void) { int cpu = raw_smp_processor_id(); idle_task_exit(); cpu_uninit(); irq_ctx_exit(cpu); cpu_clear(cpu, cpu_callout_map); cpu_clear(cpu, cpu_callin_map); numa_remove_cpu(cpu); }
/* * Called from the idle thread for the CPU which has been shutdown. * * Note that we disable IRQs here, but do not re-enable them * before returning to the caller. This is also the behaviour * of the other hotplug-cpu capable cores, so presumably coming * out of idle fixes this. */ void __ref cpu_die(void) { unsigned int cpu = smp_processor_id(); idle_task_exit(); local_irq_disable(); /* Tell __cpu_die() that this CPU is now safe to dispose of */ (void)cpu_report_death(); /* * Actually shutdown the CPU. This must never fail. The specific hotplug * mechanism must perform all required cache maintenance to ensure that * no dirty lines are lost in the process of shutting down the CPU. */ if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_die) cpu_ops[cpu]->cpu_die(cpu); BUG(); }
static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); current->active_mm = NULL; /* for sanity */ cpu = smp_processor_id(); DBG("CPU%d offline\n", cpu); generic_set_cpu_dead(cpu); smp_wmb(); /* We don't want to take decrementer interrupts while we are offline, * so clear LPCR:PECE1. We keep PECE2 enabled. */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { ppc64_runlatch_off(); power7_nap(1); ppc64_runlatch_on(); /* Reenable IRQs briefly to clear the IPI that woke us */ local_irq_enable(); local_irq_disable(); mb(); if (cpu_core_split_required()) continue; if (!generic_check_cpu_restart(cpu)) DBG("CPU%d Unexpected exit while offline !\n", cpu); } mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1); DBG("CPU%d coming online...\n", cpu); }
static void pseries_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); unsigned int hwcpu = hard_smp_processor_id(); u8 cede_latency_hint = 0; local_irq_disable(); idle_task_exit(); if (xive_enabled()) xive_teardown_cpu(); else xics_teardown_cpu(); if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { set_cpu_current_state(cpu, CPU_STATE_INACTIVE); if (ppc_md.suspend_disable_cpu) ppc_md.suspend_disable_cpu(); cede_latency_hint = 2; get_lppaca()->idle = 1; if (!lppaca_shared_proc(get_lppaca())) get_lppaca()->donate_dedicated_cpu = 1; while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { while (!prep_irq_for_idle()) { local_irq_enable(); local_irq_disable(); } extended_cede_processor(cede_latency_hint); } local_irq_disable(); if (!lppaca_shared_proc(get_lppaca())) get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->idle = 0; if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { unregister_slb_shadow(hwcpu); hard_irq_disable(); /* * Call to start_secondary_resume() will not return. * Kernel stack will be reset and start_secondary() * will be called to continue the online operation. */ start_secondary_resume(); } } /* Requested state is CPU_STATE_OFFLINE at this point */ WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); set_cpu_current_state(cpu, CPU_STATE_OFFLINE); unregister_slb_shadow(hwcpu); rtas_stop_self(); /* Should never get here... */ BUG(); for(;;); }