void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* */ BUG(); for (;;); }
void cpu_die(void) { max_xtp(); local_irq_disable(); cpu_halt(); /* Should never be here */ BUG(); for (;;); }
static void stop_this_cpu(void) { /* */ set_cpu_online(smp_processor_id(), false); max_xtp(); local_irq_disable(); cpu_halt(); }
static void stop_this_cpu(void) { /* * Remove this CPU: */ cpu_clear(smp_processor_id(), cpu_online_map); max_xtp(); local_irq_disable(); cpu_halt(); }
void fixup_irqs(void) { unsigned int irq; extern void ia64_process_pending_intr(void); extern volatile int time_keeper_id; /* Mask ITV to disable timer */ ia64_set_itv(1 << 16); /* * Find a new timesync master */ if (smp_processor_id() == time_keeper_id) { time_keeper_id = cpumask_first(cpu_online_mask); printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); } /* * Phase 1: Locate IRQs bound to this cpu and * relocate them for cpu removal. */ migrate_irqs(); /* * Phase 2: Perform interrupt processing for all entries reported in * local APIC. */ ia64_process_pending_intr(); /* * Phase 3: Now handle any interrupts not captured in local APIC. * This is to account for cases that device interrupted during the time the * rte was being disabled and re-programmed. */ for (irq=0; irq < NR_IRQS; irq++) { if (vectors_in_migration[irq]) { struct pt_regs *old_regs = set_irq_regs(NULL); vectors_in_migration[irq]=0; generic_handle_irq(irq); set_irq_regs(old_regs); } } /* * Now let processor die. We do irq disable and max_xtp() to * ensure there is no more interrupts routed to this processor. * But the local timer interrupt can have 1 pending which we * take care in timer_interrupt(). */ max_xtp(); local_irq_disable(); }
static void stop_this_cpu (void) { extern void cpu_halt (void); /* * Remove this CPU: */ clear_bit(smp_processor_id(), &cpu_online_map); max_xtp(); __cli(); cpu_halt(); }