int ath_be_handler(struct pt_regs *regs, int is_fixup) { #ifdef CONFIG_MACH_AR934x printk("ath data bus error: cause 0x%x epc 0x%x\nrebooting...", read_c0_cause(), read_c0_epc()); ath_restart(NULL); #else printk("ath data bus error: cause %#x\n", read_c0_cause()); #endif return (is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL); }
asmlinkage void plat_irq_dispatch(void) { unsigned long pending; pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) do_IRQ(AR71XX_CPU_IRQ_TIMER); else if (pending & STATUSF_IP2) ip2_handler(); else if (pending & STATUSF_IP4) do_IRQ(AR71XX_CPU_IRQ_GE0); else if (pending & STATUSF_IP5) do_IRQ(AR71XX_CPU_IRQ_GE1); else if (pending & STATUSF_IP3) ip3_handler(); else if (pending & STATUSF_IP6) ar71xx_misc_irq_dispatch(); else spurious_interrupt(); }
irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; int cpu = smp_processor_id(); /* */ if (handle_perf_irq(r2)) goto out; /* */ if (!r2 || (read_c0_cause() & (1 << 30))) { /* */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } out: return IRQ_HANDLED; }
/*! @brief the interrupt exception service function. * @details The \cplat_irq_dispatch() function is the interrupt exception * service function called. When an exception is set and the exception * code indicates an interrupt (0x00). */ asmlinkage void plat_irq_dispatch( struct pt_regs *regs /*!< registers of the interrupted task */ ) { /* Get the current pending interrupt status */ unsigned long pending = read_c0_cause() & read_c0_status(); /*! @note Clear all pending interrupts before dispatching. The interrupt information structure will call the "end" function from the do_IRQ function that should re-enable the specific interrupt while completing the interrupt handling (see irq_chip structure). */ /* Disable all MIPS active pending interrupts */ clear_c0_status(pending); /* These are the interrupts that are to be dispatched */ if( pending & (STATUSF_IP7|STATUSF_IP2) ) { /* Dispatch timer interrupt (HW INT#5/IP7) */ if (pending & STATUSF_IP7) brcm_mips_int7_dispatch(regs); /* Dispatch shared interrupt (HW INT#0/IP2) */ if (pending & STATUSF_IP2) brcm_mips_int2_dispatch(regs); /* Return following the successful interrupt exception handling */ return; } /* Other interrupts are unhandled and treated as spurious interrupts */ spurious_interrupt(regs); }
irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; struct clock_event_device *cd; int cpu = smp_processor_id(); /* * Suckage alert: * Before R2 of the architecture there was no way to see if a * performance counter interrupt was pending, so we have to run * the performance counter interrupt handler anyway. */ if (handle_perf_irq(r2)) goto out; /* * The same applies to performance counter interrupts. But with the * above we now know that the reason we got here must be a timer * interrupt. Being the paranoiacs we are we check anyway. */ if (!r2 || (read_c0_cause() & (1 << 30))) { /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); cd->event_handler(cd); } out: return IRQ_HANDLED; }
void brcm_irq_dispatch(struct pt_regs *regs) { u32 cause; cause = read_c0_cause() & read_c0_status() & CAUSEF_IP; #ifdef CONFIG_KERNPROF change_c0_status(cause | 1, 1); #else clear_c0_status(cause); #endif if (cause & CAUSEF_IP7) do_IRQ(7, regs); if (cause & CAUSEF_IP2) do_IRQ(2, regs); if (cause & CAUSEF_IP3) do_IRQ(3, regs); if (cause & CAUSEF_IP4) do_IRQ(4, regs); if (cause & CAUSEF_IP5) do_IRQ(5, regs); if (cause & CAUSEF_IP6) do_IRQ(6, regs); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause(); if (pending & CAUSEF_IP7) { do_IRQ(M36_IRQ_TIMER); } else if ((pending & CAUSEF_IP3) || (pending & CAUSEF_IP2)) { sys_irqdispatch(); } else { #ifdef CONFIG_ENABLE_RPC { unsigned long rpc_status; rpc_status = *(volatile unsigned char *)(sys_rpc_addr); rpc_status &= sys_rpc_mask; if(rpc_status & sys_rpc_irq1_mask) do_IRQ(71); if(rpc_status & sys_rpc_irq2_mask) do_IRQ(70); } #endif spurious_interrupt(); } }
asmlinkage void plat_irq_dispatch(void) { unsigned long pending; pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) do_IRQ(ATH79_CPU_IRQ(7)); else if (pending & STATUSF_IP2) ath79_ip2_handler(); else if (pending & STATUSF_IP4) do_IRQ(ATH79_CPU_IRQ(4)); else if (pending & STATUSF_IP5) do_IRQ(ATH79_CPU_IRQ(5)); else if (pending & STATUSF_IP3) ath79_ip3_handler(); else if (pending & STATUSF_IP6) do_IRQ(ATH79_CPU_IRQ(6)); else spurious_interrupt(); }
irqreturn_t smtc_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) { int cpu = smp_processor_id(); int vpflags; if (read_c0_cause() & (1 << 30)) { /* If timer interrupt, make it de-assert */ write_c0_compare (read_c0_count() - 1); vpflags = dvpe(); clear_c0_cause(0x100<<7); evpe(vpflags); /* * There are things we only want to do once per tick * in an "MP" system. One TC of each VPE will take * the actual timer interrupt. The others will get * timer broadcast IPIs. We use whoever it is that takes * the tick on VPE 0 to run the full timer_interrupt(). */ if (cpu_data[cpu].vpe_id == 0) { timer_interrupt(irq, NULL, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } else { write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); local_timer_interrupt(irq, dev_id, regs); smtc_timer_broadcast(cpu_data[cpu].vpe_id); } } return IRQ_HANDLED; }
static int tx4939_proc_show_cp0(char *sysbuf, char **start, off_t off, int count, int *eof, void *data) { int len = 0; len += sprintf(sysbuf + len, "INDEX :0x%08x\n", read_c0_index()); len += sprintf(sysbuf + len, "ENTRYLO0:0x%08lx\n", read_c0_entrylo0()); len += sprintf(sysbuf + len, "ENTRYLO1:0x%08lx\n", read_c0_entrylo1()); len += sprintf(sysbuf + len, "CONTEXT :0x%08lx\n", read_c0_context()); len += sprintf(sysbuf + len, "PAGEMASK:0x%08x\n", read_c0_pagemask()); len += sprintf(sysbuf + len, "WIRED :0x%08x\n", read_c0_wired()); len += sprintf(sysbuf + len, "COUNT :0x%08x\n", read_c0_count()); len += sprintf(sysbuf + len, "ENTRYHI :0x%08lx\n", read_c0_entryhi()); len += sprintf(sysbuf + len, "COMPARE :0x%08x\n", read_c0_compare()); len += sprintf(sysbuf + len, "STATUS :0x%08x\n", read_c0_status()); len += sprintf(sysbuf + len, "CAUSE :0x%08x\n", read_c0_cause()); len += sprintf(sysbuf + len, "PRId :0x%08x\n", read_c0_prid()); len += sprintf(sysbuf + len, "CONFIG :0x%08x\n", read_c0_config()); len += sprintf(sysbuf + len, "XCONTEXT:0x%08lx\n", read_c0_xcontext()); len += sprintf(sysbuf + len, "TagLo :0x%08x\n", read_c0_taglo()); len += sprintf(sysbuf + len, "TagHi :0x%08x\n", read_c0_taghi()); len += sprintf(sysbuf + len, "ErrorEPC:0x%08lx\n", read_c0_errorepc()); *eof = 1; return len; }
asmlinkage void plat_irq_dispatch(void) { unsigned int cpu = smp_processor_id(); unsigned int pending; /* * What a pain. We have to be really careful saving the upper 32 bits * of any * register across function calls if we don't want them * trashed--since were running in -o32, the calling routing never saves * the full 64 bits of a register across a function call. Being the * interrupt handler, we're guaranteed that interrupts are disabled * during this code so we don't have to worry about random interrupts * blasting the high 32 bits. */ pending = read_c0_cause() & read_c0_status() & ST0_IM; if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */ do_IRQ(MIPS_CPU_IRQ_BASE + 7); else if (pending & CAUSEF_IP4) do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */ #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) sb1250_mailbox_interrupt(); #endif else if (pending & CAUSEF_IP2) dispatch_ip2(); else spurious_interrupt(); }
asmlinkage void plat_irq_dispatch(void) { unsigned long pending; pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) do_IRQ(AR71XX_CPU_IRQ_TIMER); #ifdef CONFIG_PCI else if (pending & STATUSF_IP2) ar71xx_pci_irq_dispatch(); #endif else if (pending & STATUSF_IP4) do_IRQ(AR71XX_CPU_IRQ_GE0); else if (pending & STATUSF_IP5) do_IRQ(AR71XX_CPU_IRQ_GE1); else if (pending & STATUSF_IP3) do_IRQ(AR71XX_CPU_IRQ_USB); else if (pending & STATUSF_IP6) ar71xx_misc_irq_dispatch(); else spurious_interrupt(); }
asmlinkage void plat_irq_dispatch(void) { unsigned int cause = read_c0_cause(); unsigned int status = read_c0_status(); unsigned int pending = cause & status; if (pending & STATUSF_IP7) { do_IRQ(7); } else if (pending & STATUSF_IP2) { #ifdef CONFIG_HYPERTRANSPORT ll_ht_smp_irq_handler(2); #else do_IRQ(2); #endif } else if (pending & STATUSF_IP3) { do_IRQ(3); } else if (pending & STATUSF_IP4) { do_IRQ(4); } else if (pending & STATUSF_IP5) { #ifdef CONFIG_SMP titan_mailbox_irq(); #else do_IRQ(5); #endif } else if (pending & STATUSF_IP6) { do_IRQ(4); } }
/* * Dispatch interrupts. * XXX: This currently does not prioritize except in calling order. Eventually * there should perhaps be a static map which defines, the IPs to be masked for * a given IP. */ void ar7240_irq_dispatch(struct pt_regs *regs) { int pending = read_c0_status() & read_c0_cause(); if (pending & CAUSEF_IP7) do_IRQ(AR7240_CPU_IRQ_TIMER, regs); else if (pending & CAUSEF_IP2) ar7240_dispatch_pci_intr(regs); else if (pending & CAUSEF_IP4) do_IRQ(AR7240_CPU_IRQ_GE0, regs); else if (pending & CAUSEF_IP5) do_IRQ(AR7240_CPU_IRQ_GE1, regs); else if (pending & CAUSEF_IP3) do_IRQ(AR7240_CPU_IRQ_USB, regs); else if (pending & CAUSEF_IP6) ar7240_dispatch_misc_intr(regs); /* * Some PCI devices are write to clear. These writes are posted and might * require a flush (r8169.c e.g.). Its unclear what will have more * performance impact - flush after every interrupt or taking a few * "spurious" interrupts. For now, its the latter. */ /*else printk("spurious IRQ pending: 0x%x\n", pending);*/ }
static void tx4927_irq_cp0_modify(unsigned cp0_reg, unsigned clr_bits, unsigned set_bits) { unsigned long val = 0; switch (cp0_reg) { case CCP0_STATUS: val = read_c0_status(); break; case CCP0_CAUSE: val = read_c0_cause(); break; } val &= (~clr_bits); val |= (set_bits); switch (cp0_reg) { case CCP0_STATUS:{ write_c0_status(val); break; } case CCP0_CAUSE:{ write_c0_cause(val); break; } } return; }
asmlinkage void plat_irq_dispatch(void) { unsigned int cause = read_c0_cause(); unsigned int pending; int cpuid = smp_processor_id(); unsigned long flags; pending = cause & read_c0_status() & ST0_IM; #ifdef CONFIG_SMP if(pending & CAUSEF_IP3) { ipr_spinlock(flags); //irq_intc_ctrlmask_affinity(cpuid,1); response_cpu_busy |= 1 << cpuid; ipr_spinunlock(flags); jzsoc_mbox_interrupt(cpuid); ipr_spinlock(flags); response_cpu_busy &= ~(1 << cpuid); //irq_intc_ctrlmask_affinity(cpuid,0); ipr_spinunlock(flags); } #endif if (cause & CAUSEF_IP4) { do_IRQ(IRQ_OST); } if(pending & CAUSEF_IP2) { intc_irq_dispatch(cpuid); } }
asmlinkage void plat_irq_dispatch(void) { unsigned long pending; pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) do_IRQ(ATH79_CPU_IRQ_TIMER); else if (pending & STATUSF_IP2) { ath79_ddr_wb_flush(ath79_ip2_flush_reg); do_IRQ(ATH79_CPU_IRQ_IP2); } else if (pending & STATUSF_IP4) do_IRQ(ATH79_CPU_IRQ_GE0); else if (pending & STATUSF_IP5) do_IRQ(ATH79_CPU_IRQ_GE1); else if (pending & STATUSF_IP3) { ath79_ddr_wb_flush(ath79_ip3_flush_reg); do_IRQ(ATH79_CPU_IRQ_USB); } else if (pending & STATUSF_IP6) do_IRQ(ATH79_CPU_IRQ_MISC); else spurious_interrupt(); }
asmlinkage void plat_irq_dispatch(void) { unsigned int cpu = smp_processor_id(); unsigned int pending; #ifdef CONFIG_SIBYTE_BCM1480_PROF /* Set compare to count to silence count/compare timer interrupts */ write_c0_compare(read_c0_count()); #endif pending = read_c0_cause() & read_c0_status(); #ifdef CONFIG_SIBYTE_BCM1480_PROF if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */ sbprof_cpu_intr(); else #endif if (pending & CAUSEF_IP4) do_IRQ(K_BCM1480_INT_TIMER_0 + cpu); #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) bcm1480_mailbox_interrupt(); #endif else if (pending & CAUSEF_IP2) dispatch_ip2(); }
asmlinkage void plat_irq_dispatch(void) { u32 cause; do { cause = read_c0_cause() & read_c0_status() & ST0_IM; if (!cause) break; if (cause & CAUSEF_IP7) do_IRQ(7); if (cause & CAUSEF_IP2) dispatch_internal(); if (!is_ext_irq_cascaded) { if (cause & CAUSEF_IP3) do_IRQ(IRQ_EXT_0); if (cause & CAUSEF_IP4) do_IRQ(IRQ_EXT_1); if (cause & CAUSEF_IP5) do_IRQ(IRQ_EXT_2); if (cause & CAUSEF_IP6) do_IRQ(IRQ_EXT_3); } } while (1); }
void default_exept_handle(unsigned int *sp, unsigned int arg) { // printf("Exception %d\n", arg); // printf("CAUSE=%08x EPC=%08x\n", read_c0_cause(), read_c0_epc()); // printf("SP= %08x\n", sp); // printf("AT= %08x ra= %08x fp= %08x gp= %08x\n",sp[27],sp[0],sp[1],sp[2]); // printf("t9= %08x t8= %08x s7= %08x S6= %08x\n",sp[3],sp[4],sp[5],sp[6]); // printf("s5= %08x s4= %08x s3= %08x s2= %08x\n",sp[7],sp[8],sp[9],sp[10]); // printf("s1= %08x s0= %08x t7= %08x t6= %08x\n",sp[11],sp[12],sp[13],sp[14]); // printf("t5= %08x t4= %08x t3= %08x t2= %08x\n",sp[15],sp[16],sp[17],sp[18]); // printf("t1= %08x t0= %08x a3= %08x a2= %08x\n",sp[19],sp[20],sp[21],sp[22]); // printf("a1= %08x a0= %08x v1= %08x v0= %08x\n",sp[23],sp[24],sp[25], sp[26]); // printf("\n"); // printf("c_except_handler: while(1)"); // printf("\n"); // while(1); __except_sp = sp; __except_arg = arg; __except_cause = read_c0_cause(); __except_epc = read_c0_epc(); write_32bit_cp0_register(CP0_EPC, _except_idle); __asm__ __volatile__("eret\n\t"); }
void rsv_ins_except(unsigned int arg) { unsigned int cause; unsigned int epc; unsigned int *ins; unsigned int i; cause = read_c0_cause(); epc = read_c0_epc(); printf("Reserved Instruction Exception\n"); printf("CAUSE=%08x EPC=%08x\n", cause, epc); ins = (unsigned int*)epc -5; for(i= 0; i < 10; i++) { printf("%08x: %08x\n", ins, *ins++); } printf("\n"); printf("c_except_handler: while(1)"); printf("\n"); while(1); }
asmlinkage void plat_irq_dispatch(void) { unsigned int cause = read_c0_cause(); unsigned int pending = cause & read_c0_status() & ST0_IM; if (cause & CAUSEF_IP4) { do_IRQ(IRQ_OST); } #ifdef CONFIG_SMP if(pending & CAUSEF_IP3) { jzsoc_mbox_interrupt(); } #endif if(pending & CAUSEF_IP2) intc_irq_dispatch(); cause = read_c0_cause(); pending = cause & read_c0_status() & ST0_IM; }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & CAUSEF_IP7) do_IRQ(5); else rt_irq_dispatch(); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & CAUSEF_IP7) mips_timer_interrupt(); else rt2880_irqdispatch(); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending; pending = read_c0_cause() & read_c0_status() & ST0_IM; /* machine-specific plat_irq_dispatch */ mach_irq_dispatch(pending); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending; #ifdef CONFIG_SIBYTE_SB1250_PROF /* Set compare to count to silence count/compare timer interrupts */ write_c0_compare(read_c0_count()); #endif /* * What a pain. We have to be really careful saving the upper 32 bits * of any * register across function calls if we don't want them * trashed--since were running in -o32, the calling routing never saves * the full 64 bits of a register across a function call. Being the * interrupt handler, we're guaranteed that interrupts are disabled * during this code so we don't have to worry about random interrupts * blasting the high 32 bits. */ pending = read_c0_cause() & read_c0_status() & ST0_IM; #ifdef CONFIG_SIBYTE_SB1250_PROF if (pending & CAUSEF_IP7) /* Cpu performance counter interrupt */ sbprof_cpu_intr(); else #endif if (pending & CAUSEF_IP4) sb1250_timer_interrupt(); #ifdef CONFIG_SMP else if (pending & CAUSEF_IP3) sb1250_mailbox_interrupt(); #endif #ifdef CONFIG_KGDB else if (pending & CAUSEF_IP6) /* KGDB (uart 1) */ sb1250_kgdb_interrupt(); #endif else if (pending & CAUSEF_IP2) { unsigned long long mask; /* * Default...we've hit an IP[2] interrupt, which means we've * got to check the 1250 interrupt registers to figure out what * to do. Need to detect which CPU we're on, now that * smp_affinity is supported. */ mask = __raw_readq(IOADDR(A_IMR_REGISTER(smp_processor_id(), R_IMR_INTERRUPT_STATUS_BASE))); if (mask) do_IRQ(fls64(mask) - 1); else spurious_interrupt(); } else spurious_interrupt(); }
u32 do_general_exception(arch_regs_t *uregs) { u32 cp0_cause = read_c0_cause(); u32 cp0_status = read_c0_status(); mips32_entryhi_t ehi; u32 victim_asid; u32 victim_inst; struct vmm_vcpu *c_vcpu; u8 delay_slot_exception = IS_BD_SET(cp0_cause); ehi._entryhi = read_c0_entryhi(); victim_asid = ehi._s_entryhi.asid >> ASID_SHIFT; c_vcpu = vmm_scheduler_current_vcpu(); /* * When exception is happening in the delay slot. We need to emulate * the corresponding branch instruction first. If its one of the "likely" * instructions, we don't need to emulate the faulting instruction since * "likely" instructions don't allow slot to be executed if branch is not * taken. */ if (delay_slot_exception) { victim_inst = *((u32 *)(uregs->cp0_epc + 4)); /* * If this function returns zero, the branch instruction was a * "likely" instruction and the branch wasn't taken. So don't * execute the delay slot, just return. The correct EPC to return * to will be programmed under our feet. */ if (!cpu_vcpu_emulate_branch_and_jump_inst(c_vcpu, *((u32 *)uregs->cp0_epc), uregs)) { return VMM_OK; } } else { victim_inst = *((u32 *)uregs->cp0_epc); } switch (EXCEPTION_CAUSE(cp0_cause)) { case EXEC_CODE_COPU: cpu_vcpu_emulate_cop_inst(c_vcpu, victim_inst, uregs); if (!delay_slot_exception) uregs->cp0_epc += 4; break; case EXEC_CODE_TLBL: if (CPU_IN_USER_MODE(cp0_status) && is_vmm_asid(ehi._s_entryhi.asid)) { ehi._s_entryhi.asid = (0x1 << ASID_SHIFT); write_c0_entryhi(ehi._entryhi); vmm_panic("CPU is in user mode and ASID is pointing to VMM!!\n"); } break; } return VMM_OK; }
__IMEM asmlinkage void plat_irq_dispatch(void) { #ifdef CONFIG_MIPS_TC3262 int irq = ((read_c0_cause() & ST0_IM) >> 10); do_IRQ(irq); #else do_IRQ(VPint(CR_INTC_IVR)); #endif }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP7) /* cpu timer */ do_IRQ(7); else if (pending & STATUSF_IP2) /* int0 hardware line */ ar7_cascade(); else spurious_interrupt(); }
asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP2) do_IRQ(2); else if (pending & STATUSF_IP3) do_IRQ(3); else spurious_interrupt(); }