static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { xen_kexec_image_t *image = arg; int ii; /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); relocate_new_kernel(image->indirection_page, image->start_address, __pa(ia64_boot_param), image->reboot_code_buffer); BUG(); }
static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); unsigned long vector; int ii; u64 fp, gp; ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; /* Register noop init handler */ fp = ia64_tpa(init_handler->fp); gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); } else { /* Unregister init handlers of current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } /* Unregister mca handler - No more recovery on current kernel */ ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); vector = ia64_get_ivr(); while (vector != IA64_SPURIOUS_INT_VECTOR) { ia64_eoi(); vector = ia64_get_ivr(); } platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); if (__ratelimit(&ratelimit)) { printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { scheduler_ipi(); kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); set_irq_regs(old_regs); }
/* * This function emulates a interrupt processing when a cpu is about to be * brought down. */ void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); /* * Perform normal interrupt style processing */ while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); /* * Now try calling normal ia64_handle_irq as it would have got called * from a real intr handler. Try passing null for pt_regs, hopefully * it will work. I hope it works!. * Probably could shared code. */ if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); /* * Disable interrupts and send EOI */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); }
void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; u64 vector, cache_type = 3; struct ia64_sal_retval isrv; if (sal_cache_flush_drops_interrupts) return; cpu = get_cpu(); local_irq_save(flags); /* * Send ourselves a timer interrupt, wait until it's reported, and see * if SAL_CACHE_FLUSH drops it. */ platform_send_ipi(cpu, IA64_TIMER_VECTOR, IA64_IPI_DM_INT, 0); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); SAL_CALL(isrv, SAL_CACHE_FLUSH, cache_type, 0, 0, 0, 0, 0, 0); if (isrv.status) printk(KERN_ERR "SAL_CAL_FLUSH failed with %ld\n", isrv.status); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); ia64_eoi(); WARN_ON(vector != IA64_TIMER_VECTOR); } else { sal_cache_flush_drops_interrupts = 1; printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " "PAL_CACHE_FLUSH will be used instead\n"); ia64_eoi(); } local_irq_restore(flags); put_cpu(); }
/* * Do not allocate memory (or fail in any way) in machine_kexec(). * We are past the point of no return, committed to rebooting now. */ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) { struct kimage *image = arg; relocate_new_kernel_t rnk; void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); int ii; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; } /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); /* Mask CMC and Performance Monitor interrupts */ ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* Mask ITV and Local Redirect Registers */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); /* terminate possible nested in-service interrupts */ for (ii = 0; ii < 16; ii++) ia64_eoi(); /* unmask TPR and clear any pending interrupts */ ia64_setreg(_IA64_REG_CR_TPR, 0); ia64_srlz_d(); while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); platform_kernel_launch_event(); rnk = (relocate_new_kernel_t)&code_addr; (*rnk)(image->head, image->start, ia64_boot_param, GRANULEROUNDDOWN((unsigned long) pal_addr)); BUG(); }
static inline void sn_get_all_ivr(void) { int vector; vector = ia64_get_ivr(); while (vector != IA64_SPURIOUS_INT_VECTOR) { __set_bit(vector, (volatile void *)pda.sn_soft_irr); ia64_eoi(); if (vector > IA64_LAST_DEVICE_VECTOR) return; vector = ia64_get_ivr(); } }
static void __init check_sal_cache_flush (void) { unsigned long flags; int cpu; u64 vector; cpu = get_cpu(); local_irq_save(flags); /* * Schedule a timer interrupt, wait until it's reported, and see if * SAL_CACHE_FLUSH drops it. */ ia64_set_itv(IA64_TIMER_VECTOR); ia64_set_itm(ia64_get_itc() + 1000); while (!ia64_get_irr(IA64_TIMER_VECTOR)) cpu_relax(); ia64_sal_cache_flush(3); if (ia64_get_irr(IA64_TIMER_VECTOR)) { vector = ia64_get_ivr(); ia64_eoi(); WARN_ON(vector != IA64_TIMER_VECTOR); } else { sal_cache_flush_drops_interrupts = 1; printk(KERN_ERR "SAL: SAL_CACHE_FLUSH drops interrupts; " "PAL_CACHE_FLUSH will be used instead\n"); ia64_eoi(); } local_irq_restore(flags); put_cpu(); }
void ia64_process_pending_intr(void) { ia64_vector vector; unsigned long saved_tpr; extern unsigned int vectors_in_migration[NR_IRQS]; vector = ia64_get_ivr(); irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { struct pt_regs *old_regs = set_irq_regs(NULL); ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d not being mapped " "to any IRQ!!\n", __func__, vector, smp_processor_id()); } else { vectors_in_migration[irq]=0; generic_handle_irq(irq); } set_irq_regs(old_regs); local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } irq_exit(); }
/* * That's where the IVT branches when we get an external * interrupt. This branches to the correct hardware IRQ handler via * function ptr. */ void ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); unsigned long saved_tpr; #if IRQ_DEBUG { unsigned long bsp, sp; /* * Note: if the interrupt happened while executing in * the context switch routine (ia64_switch_to), we may * get a spurious stack overflow here. This is * because the register and the memory stack are not * switched atomically. */ bsp = ia64_getreg(_IA64_REG_AR_BSP); sp = ia64_getreg(_IA64_REG_SP); if ((sp - bsp) < 1024) { static unsigned char count; static long last_time; if (time_after(jiffies, last_time + 5 * HZ)) count = 0; if (++count < 5) { last_time = jiffies; printk("ia64_handle_irq: DANGER: less than " "1KB of free stack space!!\n" "(bsp=0x%lx, sp=%lx)\n", bsp, sp); } } } #endif /* IRQ_DEBUG */ /* * Always set TPR to limit maximum interrupt nesting depth to * 16 (without this, it would be ~240, which could easily lead * to kernel stack overflows). */ irq_enter(); saved_tpr = ia64_getreg(_IA64_REG_CR_TPR); ia64_srlz_d(); while (vector != IA64_SPURIOUS_INT_VECTOR) { int irq = local_vector_to_irq(vector); struct irq_desc *desc = irq_to_desc(irq); if (unlikely(IS_LOCAL_TLB_FLUSH(vector))) { smp_local_flush_tlb(); kstat_incr_irqs_this_cpu(irq, desc); } else if (unlikely(IS_RESCHEDULE(vector))) { kstat_incr_irqs_this_cpu(irq, desc); } else { ia64_setreg(_IA64_REG_CR_TPR, vector); ia64_srlz_d(); if (unlikely(irq < 0)) { printk(KERN_ERR "%s: Unexpected interrupt " "vector %d on CPU %d is not mapped " "to any IRQ!\n", __func__, vector, smp_processor_id()); } else generic_handle_irq(irq); /* * Disable interrupts and send EOI: */ local_irq_disable(); ia64_setreg(_IA64_REG_CR_TPR, saved_tpr); } ia64_eoi(); vector = ia64_get_ivr(); } /* * This must be done *after* the ia64_eoi(). For example, the keyboard softirq * handler needs to be able to wait for further keyboard interrupts, which can't * come through until ia64_eoi() has been done. */ irq_exit(); set_irq_regs(old_regs); }
static void ia64_ack_msi_irq(unsigned int irq) { irq_complete_move(irq); move_native_irq(irq); ia64_eoi(); }
static void sn_ack_msi_irq(struct irq_data *data) { irq_move_irq(data); ia64_eoi(); }
static void ia64_ack_msi_irq(unsigned int irq) { move_native_irq(irq); ia64_eoi(); }
static void ia64_ack_msi_irq(struct irq_data *data) { irq_complete_move(data->irq); irq_move_irq(data); ia64_eoi(); }