void xen_smp_intr_init_early(unsigned int cpu) { #ifdef CONFIG_SMP unsigned int i; for (i = 0; i < saved_irq_cnt; i++) __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, saved_percpu_irqs[i].action, 0); #endif }
static void xen_bind_early_percpu_irq(void) { int i; xen_slab_ready = 1; /* There's no race when accessing this cached array, since only * BSP will face with such step shortly */ for (i = 0; i < late_irq_cnt; i++) __xen_register_percpu_irq(smp_processor_id(), saved_percpu_irqs[i].irq, saved_percpu_irqs[i].action, 0); }
static void xen_bind_early_percpu_irq(void) { int i; xen_slab_ready = 1; /* */ for (i = 0; i < late_irq_cnt; i++) __xen_register_percpu_irq(smp_processor_id(), saved_percpu_irqs[i].irq, saved_percpu_irqs[i].action, 0); }
static void xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) { __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); }