void dosoftint() { struct cpu_info *ci = curcpu(); int sir, q, mask; #ifdef MULTIPROCESSOR register_t sr; /* Enable interrupts */ sr = getsr(); ENABLEIPI(); __mp_lock(&kernel_lock); #endif while ((sir = ci->ci_softpending) != 0) { atomic_clearbits_int(&ci->ci_softpending, sir); for (q = SI_NQUEUES - 1; q >= 0; q--) { mask = SINTMASK(q); if (sir & mask) softintr_dispatch(q); } } #ifdef MULTIPROCESSOR __mp_unlock(&kernel_lock); setsr(sr); #endif }
/* * Interrupt dispatcher. */ uint32_t obio_iointr(uint32_t hwpend, struct trap_frame *frame) { struct cpu_info *ci = curcpu(); int cpuid = cpu_number(); uint64_t imr, isr, mask; int ipl; int bit; struct intrhand *ih; int rc; uint64_t sum0 = CIU_IP2_SUM0(cpuid); uint64_t en0 = CIU_IP2_EN0(cpuid); isr = bus_space_read_8(&obio_tag, obio_h, sum0); imr = bus_space_read_8(&obio_tag, obio_h, en0); bit = 63; isr &= imr; if (isr == 0) return 0; /* not for us */ /* * Mask all pending interrupts. */ bus_space_write_8(&obio_tag, obio_h, en0, imr & ~isr); /* * If interrupts are spl-masked, mask them and wait for splx() * to reenable them when necessary. */ if ((mask = isr & obio_imask[cpuid][frame->ipl]) != 0) { isr &= ~mask; imr &= ~mask; } /* * Now process allowed interrupts. */ if (isr != 0) { int lvl, bitno; uint64_t tmpisr; __asm__ (".set noreorder\n"); ipl = ci->ci_ipl; __asm__ ("sync\n\t.set reorder\n"); /* Service higher level interrupts first */ for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { tmpisr = isr & (obio_imask[cpuid][lvl] ^ obio_imask[cpuid][lvl - 1]); if (tmpisr == 0) continue; for (bitno = bit, mask = 1UL << bitno; mask != 0; bitno--, mask >>= 1) { if ((tmpisr & mask) == 0) continue; rc = 0; for (ih = (struct intrhand *)obio_intrhand[bitno]; ih != NULL; ih = ih->ih_next) { #ifdef MULTIPROCESSOR u_int32_t sr; #endif splraise(ih->ih_level); #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { sr = getsr(); ENABLEIPI(); if (ipl < IPL_SCHED) __mp_lock(&kernel_lock); } #endif if ((*ih->ih_fun)(ih->ih_arg) != 0) { rc = 1; atomic_add_uint64(&ih->ih_count.ec_count, 1); } #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { if (ipl < IPL_SCHED) __mp_unlock(&kernel_lock); setsr(sr); } #endif __asm__ (".set noreorder\n"); ci->ci_ipl = ipl; __asm__ ("sync\n\t.set reorder\n"); } if (rc == 0) printf("spurious crime interrupt %d\n", bitno); isr ^= mask; if ((tmpisr ^= mask) == 0) break; } } /* * Reenable interrupts which have been serviced. */ bus_space_write_8(&obio_tag, obio_h, en0, imr); }
/* * Handle an exception. * In the case of a kernel trap, we return the pc where to resume if * pcb_onfault is set, otherwise, return old pc. */ void trap(struct trap_frame *trapframe) { struct cpu_info *ci = curcpu(); struct proc *p = ci->ci_curproc; int type; type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT; #if defined(CPU_R8000) && !defined(DEBUG_INTERRUPT) if (type != T_INT) #endif trapdebug_enter(ci, trapframe, -1); #ifdef CPU_R8000 if (type != T_INT && type != T_SYSCALL) #else if (type != T_SYSCALL) #endif atomic_add_int(&uvmexp.traps, 1); if (USERMODE(trapframe->sr)) { type |= T_USER; refreshcreds(p); } /* * Enable hardware interrupts if they were on before the trap; * enable IPI interrupts only otherwise. */ switch (type) { #ifdef CPU_R8000 case T_INT: case T_INT | T_USER: #endif case T_BREAK: break; default: if (ISSET(trapframe->sr, SR_INT_ENAB)) enableintr(); else { #ifdef MULTIPROCESSOR ENABLEIPI(); #endif } break; } #ifdef CPU_R8000 /* * Some exception causes on R8000 are actually detected by external * circuitry, and as such are reported as external interrupts. * On R8000 kernels, external interrupts vector to trap() instead of * interrupt(), so that we can process these particular exceptions * as if they were triggered as regular exceptions. */ if ((type & ~T_USER) == T_INT) { /* * Similar reality check as done in interrupt(), in case * an interrupt occured between a write to COP_0_STATUS_REG * and it taking effect. */ if (!ISSET(trapframe->sr, SR_INT_ENAB)) return; if (trapframe->cause & CR_VCE) { #ifndef DEBUG_INTERRUPT trapdebug_enter(ci, trapframe, -1); #endif panic("VCE or TLBX"); } if (trapframe->cause & CR_FPE) { #ifndef DEBUG_INTERRUPT trapdebug_enter(ci, trapframe, -1); #endif itsa(trapframe, ci, p, T_FPE | (type & T_USER)); cp0_reset_cause(CR_FPE); } if (trapframe->cause & CR_INT_MASK) interrupt(trapframe); return; /* no userret */ } else #endif itsa(trapframe, ci, p, type); if (type & T_USER) userret(p); }
/* * Interrupt handler for targets using the internal count register * as interval clock. Normally the system is run with the clock * interrupt always enabled. Masking is done here and if the clock * can not be run the tick is just counted and handled later when * the clock is logically unmasked again. */ uint32_t cp0_int5(uint32_t mask, struct trapframe *tf) { u_int32_t clkdiff; struct cpu_info *ci = curcpu(); /* * If we got an interrupt before we got ready to process it, * retrigger it as far as possible. cpu_initclocks() will * take care of retriggering it correctly. */ if (ci->ci_clock_started == 0) { cp0_set_compare(cp0_get_count() - 1); return CR_INT_5; } /* * Count how many ticks have passed since the last clock interrupt... */ clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; while (clkdiff >= ci->ci_cpu_counter_interval) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; ci->ci_pendingticks++; } ci->ci_pendingticks++; ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; /* * Set up next tick, and check if it has just been hit; in this * case count it and schedule one tick ahead. */ cp0_set_compare(ci->ci_cpu_counter_last); clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; if ((int)clkdiff >= 0) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; ci->ci_pendingticks++; cp0_set_compare(ci->ci_cpu_counter_last); } /* * Process clock interrupt unless it is currently masked. */ if (tf->ipl < IPL_CLOCK) { #ifdef MULTIPROCESSOR register_t sr; sr = getsr(); ENABLEIPI(); #endif while (ci->ci_pendingticks) { cp0_clock_count.ec_count++; hardclock(tf); ci->ci_pendingticks--; } #ifdef MULTIPROCESSOR setsr(sr); #endif } return CR_INT_5; /* Clock is always on 5 */ }
/* * Interrupt dispatcher. */ uint32_t INTR_FUNCTIONNAME(uint32_t hwpend, struct trap_frame *frame) { struct cpu_info *ci = curcpu(); uint64_t imr, isr, mask; int ipl; int bit; struct intrhand *ih; int rc, ret; INTR_LOCAL_DECLS INTR_GETMASKS; isr &= imr; if (isr == 0) return 0; /* not for us */ /* * Mask all pending interrupts. */ INTR_MASKPENDING; /* * If interrupts are spl-masked, mask them and wait for splx() * to reenable them when necessary. */ if ((mask = isr & INTR_IMASK(frame->ipl)) != 0) { isr &= ~mask; imr &= ~mask; } /* * Now process allowed interrupts. */ if (isr != 0) { int lvl, bitno; uint64_t tmpisr; __asm__ (".set noreorder\n"); ipl = ci->ci_ipl; __asm__ ("sync\n\t.set reorder\n"); /* Service higher level interrupts first */ for (lvl = NIPLS - 1; lvl != IPL_NONE; lvl--) { tmpisr = isr & (INTR_IMASK(lvl) ^ INTR_IMASK(lvl - 1)); if (tmpisr == 0) continue; for (bitno = bit, mask = 1UL << bitno; mask != 0; bitno--, mask >>= 1) { if ((tmpisr & mask) == 0) continue; rc = 0; for (ih = INTR_HANDLER(bitno); ih != NULL; ih = ih->ih_next) { #ifdef MULTIPROCESSOR u_int32_t sr; #endif #if defined(INTR_HANDLER_SKIP) if (INTR_HANDLER_SKIP(ih) != 0) continue; #endif splraise(ih->ih_level); #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { sr = getsr(); ENABLEIPI(); if (ipl < IPL_SCHED) __mp_lock(&kernel_lock); } #endif ret = (*ih->ih_fun)(ih->ih_arg); if (ret != 0) { rc = 1; atomic_add_uint64(&ih->ih_count.ec_count, 1); } #ifdef MULTIPROCESSOR if (ih->ih_level < IPL_IPI) { if (ipl < IPL_SCHED) __mp_unlock(&kernel_lock); setsr(sr); } #endif __asm__ (".set noreorder\n"); ci->ci_ipl = ipl; __asm__ ("sync\n\t.set reorder\n"); if (ret == 1) break; } if (rc == 0) INTR_SPURIOUS(bitno); isr ^= mask; if ((tmpisr ^= mask) == 0) break; } } /* * Reenable interrupts which have been serviced. */ INTR_MASKRESTORE; }
/* * Handle an exception. * In the case of a kernel trap, we return the pc where to resume if * pcb_onfault is set, otherwise, return old pc. */ void trap(struct trap_frame *trapframe) { struct cpu_info *ci = curcpu(); int type, i; unsigned ucode = 0; struct proc *p = ci->ci_curproc; vm_prot_t ftype; extern vaddr_t onfault_table[]; int onfault; int typ = 0; union sigval sv; trapdebug_enter(ci, trapframe, -1); type = (trapframe->cause & CR_EXC_CODE) >> CR_EXC_CODE_SHIFT; if (USERMODE(trapframe->sr)) { type |= T_USER; } /* * Enable hardware interrupts if they were on before the trap; * enable IPI interrupts only otherwise. */ if (type != T_BREAK) { if (ISSET(trapframe->sr, SR_INT_ENAB)) enableintr(); else { #ifdef MULTIPROCESSOR ENABLEIPI(); #endif } } switch (type) { case T_TLB_MOD: /* check for kernel address */ if (trapframe->badvaddr < 0) { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pte = kvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: ktlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap_kernel(), trunc_page(trapframe->badvaddr), entry)) { /* write to read only page in the kernel */ ftype = VM_PROT_WRITE; goto kernel_fault; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_kernel_page(trapframe->badvaddr & ~PGOFSET, entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: ktlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); return; } /* FALLTHROUGH */ case T_TLB_MOD+T_USER: { pt_entry_t *pte, entry; paddr_t pa; vm_page_t pg; pmap_t pmap = p->p_vmspace->vm_map.pmap; if (!(pte = pmap_segmap(pmap, trapframe->badvaddr))) panic("trap: utlbmod: invalid segmap"); pte += uvtopte(trapframe->badvaddr); entry = *pte; #ifdef DIAGNOSTIC if (!(entry & PG_V) || (entry & PG_M)) panic("trap: utlbmod: invalid pte"); #endif if (pmap_is_page_ro(pmap, trunc_page(trapframe->badvaddr), entry)) { /* write to read only page */ ftype = VM_PROT_WRITE; goto fault_common; } entry |= PG_M; *pte = entry; KERNEL_LOCK(); pmap_update_user_page(pmap, (trapframe->badvaddr & ~PGOFSET), entry); pa = pfn_to_pad(entry); pg = PHYS_TO_VM_PAGE(pa); if (pg == NULL) panic("trap: utlbmod: unmanaged page"); pmap_set_modify(pg); KERNEL_UNLOCK(); if (!USERMODE(trapframe->sr)) return; goto out; } case T_TLB_LD_MISS: case T_TLB_ST_MISS: ftype = (type == T_TLB_ST_MISS) ? VM_PROT_WRITE : VM_PROT_READ; /* check for kernel address */ if (trapframe->badvaddr < 0) { vaddr_t va; int rv; kernel_fault: va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(kernel_map, trunc_page(va), 0, ftype); KERNEL_UNLOCK(); p->p_addr->u_pcb.pcb_onfault = onfault; if (rv == 0) return; if (onfault != 0) { p->p_addr->u_pcb.pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } /* * It is an error for the kernel to access user space except * through the copyin/copyout routines. */ if (p->p_addr->u_pcb.pcb_onfault != 0) { /* * We want to resolve the TLB fault before invoking * pcb_onfault if necessary. */ goto fault_common; } else { goto err; } case T_TLB_LD_MISS+T_USER: ftype = VM_PROT_READ; goto fault_common; case T_TLB_ST_MISS+T_USER: ftype = VM_PROT_WRITE; fault_common: { vaddr_t va; struct vmspace *vm; vm_map_t map; int rv; vm = p->p_vmspace; map = &vm->vm_map; va = trunc_page((vaddr_t)trapframe->badvaddr); onfault = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; KERNEL_LOCK(); rv = uvm_fault(map, trunc_page(va), 0, ftype); p->p_addr->u_pcb.pcb_onfault = onfault; /* * If this was a stack access we keep track of the maximum * accessed stack size. Also, if vm_fault gets a protection * failure it is due to accessing the stack region outside * the current limit and we need to reflect that as an access * error. */ if ((caddr_t)va >= vm->vm_maxsaddr) { if (rv == 0) uvm_grow(p, va); else if (rv == EACCES) rv = EFAULT; } KERNEL_UNLOCK(); if (rv == 0) { if (!USERMODE(trapframe->sr)) return; goto out; } if (!USERMODE(trapframe->sr)) { if (onfault != 0) { p->p_addr->u_pcb.pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; } #ifdef ADEBUG printf("SIG-SEGV @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra); #endif ucode = ftype; i = SIGSEGV; typ = SEGV_MAPERR; break; } case T_ADDR_ERR_LD+T_USER: /* misaligned or kseg access */ case T_ADDR_ERR_ST+T_USER: /* misaligned or kseg access */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_ADRALN; #ifdef ADEBUG printf("SIG-BUSA @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra); #endif break; case T_BUS_ERR_IFETCH+T_USER: /* BERR asserted to cpu */ case T_BUS_ERR_LD_ST+T_USER: /* BERR asserted to cpu */ ucode = 0; /* XXX should be VM_PROT_something */ i = SIGBUS; typ = BUS_OBJERR; #ifdef ADEBUG printf("SIG-BUSB @%p pc %p, ra %p\n", trapframe->badvaddr, trapframe->pc, trapframe->ra); #endif break; case T_SYSCALL+T_USER: { struct trap_frame *locr0 = p->p_md.md_regs; struct sysent *callp; unsigned int code; unsigned long tpc; int numsys; struct args { register_t i[8]; } args; register_t rval[2]; uvmexp.syscalls++; /* compute next PC after syscall instruction */ tpc = trapframe->pc; /* Remember if restart */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; callp = p->p_emul->e_sysent; numsys = p->p_emul->e_nsysent; code = locr0->v0; switch (code) { case SYS_syscall: /* * Code is first argument, followed by actual args. */ code = locr0->a0; if (code >= numsys) callp += p->p_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_argsize / sizeof(register_t); args.i[0] = locr0->a1; args.i[1] = locr0->a2; args.i[2] = locr0->a3; if (i > 3) { args.i[3] = locr0->a4; args.i[4] = locr0->a5; args.i[5] = locr0->a6; args.i[6] = locr0->a7; i = copyin((void *)locr0->sp, &args.i[7], sizeof(register_t)); } break; case SYS___syscall: /* * Like syscall, but code is a quad, so as to maintain * quad alignment for the rest of the arguments. */ code = locr0->a0; args.i[0] = locr0->a1; args.i[1] = locr0->a2; args.i[2] = locr0->a3; if (code >= numsys) callp += p->p_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_argsize / sizeof(int); if (i > 3) { args.i[3] = locr0->a4; args.i[4] = locr0->a5; args.i[5] = locr0->a6; args.i[6] = locr0->a7; i = copyin((void *)locr0->sp, &args.i[7], sizeof(register_t)); } break; default: if (code >= numsys) callp += p->p_emul->e_nosys; /* (illegal) */ else callp += code; i = callp->sy_narg; args.i[0] = locr0->a0; args.i[1] = locr0->a1; args.i[2] = locr0->a2; args.i[3] = locr0->a3; if (i > 4) { args.i[4] = locr0->a4; args.i[5] = locr0->a5; args.i[6] = locr0->a6; args.i[7] = locr0->a7; } } #ifdef SYSCALL_DEBUG KERNEL_LOCK(); scdebug_call(p, code, args.i); KERNEL_UNLOCK(); #endif #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) { KERNEL_LOCK(); ktrsyscall(p, code, callp->sy_argsize, args.i); KERNEL_UNLOCK(); } #endif rval[0] = 0; rval[1] = locr0->v1; #if defined(DDB) || defined(DEBUG) trapdebug[TRAPSIZE * ci->ci_cpuid + (trppos[ci->ci_cpuid] == 0 ? TRAPSIZE : trppos[ci->ci_cpuid]) - 1].code = code; #endif #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { KERNEL_LOCK(); i = systrace_redirect(code, p, args.i, rval); KERNEL_UNLOCK(); } else #endif { int nolock = (callp->sy_flags & SY_NOLOCK); if (!nolock) KERNEL_LOCK(); i = (*callp->sy_call)(p, &args, rval); if (!nolock) KERNEL_UNLOCK(); } switch (i) { case 0: locr0->v0 = rval[0]; locr0->v1 = rval[1]; locr0->a3 = 0; break; case ERESTART: locr0->pc = tpc; break; case EJUSTRETURN: break; /* nothing to do */ default: locr0->v0 = i; locr0->a3 = 1; } #ifdef SYSCALL_DEBUG KERNEL_LOCK(); scdebug_ret(p, code, i, rval); KERNEL_UNLOCK(); #endif #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) { KERNEL_LOCK(); ktrsysret(p, code, i, rval[0]); KERNEL_UNLOCK(); } #endif goto out; } case T_BREAK: #ifdef DDB kdb_trap(type, trapframe); #endif /* Reenable interrupts if necessary */ if (trapframe->sr & SR_INT_ENAB) { enableintr(); } return; case T_BREAK+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of break instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); switch ((instr & BREAK_VAL_MASK) >> BREAK_VAL_SHIFT) { case 6: /* gcc range error */ i = SIGFPE; typ = FPE_FLTSUB; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; case 7: /* gcc3 divide by zero */ i = SIGFPE; typ = FPE_INTDIV; /* skip instruction */ if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; break; #ifdef PTRACE case BREAK_SSTEP_VAL: if (p->p_md.md_ss_addr == (long)va) { #ifdef DEBUG printf("trap: %s (%d): breakpoint at %p " "(insn %08x)\n", p->p_comm, p->p_pid, p->p_md.md_ss_addr, p->p_md.md_ss_instr); #endif /* Restore original instruction and clear BP */ process_sstep(p, 0); typ = TRAP_BRKPT; } else { typ = TRAP_TRACE; } i = SIGTRAP; break; #endif #ifdef FPUEMUL case BREAK_FPUEMUL_VAL: /* * If this is a genuine FP emulation break, * resume execution to our branch destination. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && p->p_md.md_fppgva + 4 == (vaddr_t)va) { struct vm_map *map = &p->p_vmspace->vm_map; p->p_md.md_flags &= ~MDP_FPUSED; locr0->pc = p->p_md.md_fpbranchva; /* * Prevent access to the relocation page. * XXX needs to be fixed to work with rthreads */ uvm_fault_unwire(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE); (void)uvm_map_protect(map, p->p_md.md_fppgva, p->p_md.md_fppgva + PAGE_SIZE, UVM_PROT_NONE, FALSE); goto out; } /* FALLTHROUGH */ #endif default: typ = TRAP_TRACE; i = SIGTRAP; break; } break; } case T_IWATCH+T_USER: case T_DWATCH+T_USER: { caddr_t va; /* compute address of trapped instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; printf("watch exception @ %p\n", va); #ifdef RM7K_PERFCNTR if (rm7k_watchintr(trapframe)) { /* Return to user, don't add any more overhead */ goto out; } #endif i = SIGTRAP; typ = TRAP_BRKPT; break; } case T_TRAP+T_USER: { caddr_t va; u_int32_t instr; struct trap_frame *locr0 = p->p_md.md_regs; /* compute address of trap instruction */ va = (caddr_t)trapframe->pc; if (trapframe->cause & CR_BR_DELAY) va += 4; /* read break instruction */ copyin(va, &instr, sizeof(int32_t)); if (trapframe->cause & CR_BR_DELAY) locr0->pc = MipsEmulateBranch(locr0, trapframe->pc, 0, 0); else locr0->pc += 4; #ifdef RM7K_PERFCNTR if (instr == 0x040c0000) { /* Performance cntr trap */ int result; result = rm7k_perfcntr(trapframe->a0, trapframe->a1, trapframe->a2, trapframe->a3); locr0->v0 = -result; /* Return to user, don't add any more overhead */ goto out; } else #endif /* * GCC 4 uses teq with code 7 to signal divide by * zero at runtime. This is one instruction shorter * than the BEQ + BREAK combination used by gcc 3. */ if ((instr & 0xfc00003f) == 0x00000034 /* teq */ && (instr & 0x001fffc0) == ((ZERO << 16) | (7 << 6))) { i = SIGFPE; typ = FPE_INTDIV; } else { i = SIGEMT; /* Stuff it with something for now */ typ = 0; } break; } case T_RES_INST+T_USER: i = SIGILL; typ = ILL_ILLOPC; break; case T_COP_UNUSABLE+T_USER: /* * Note MIPS IV COP1X instructions issued with FPU * disabled correctly report coprocessor 1 as the * unusable coprocessor number. */ if ((trapframe->cause & CR_COP_ERR) != 0x10000000) { i = SIGILL; /* only FPU instructions allowed */ typ = ILL_ILLOPC; break; } #ifdef FPUEMUL MipsFPTrap(trapframe); #else enable_fpu(p); #endif goto out; case T_FPE: printf("FPU Trap: PC %x CR %x SR %x\n", trapframe->pc, trapframe->cause, trapframe->sr); goto err; case T_FPE+T_USER: MipsFPTrap(trapframe); goto out; case T_OVFLOW+T_USER: i = SIGFPE; typ = FPE_FLTOVF; break; case T_ADDR_ERR_LD: /* misaligned access */ case T_ADDR_ERR_ST: /* misaligned access */ case T_BUS_ERR_LD_ST: /* BERR asserted to cpu */ if ((onfault = p->p_addr->u_pcb.pcb_onfault) != 0) { p->p_addr->u_pcb.pcb_onfault = 0; trapframe->pc = onfault_table[onfault]; return; } goto err; default: err: disableintr(); #if !defined(DDB) && defined(DEBUG) trapDump("trap"); #endif printf("\nTrap cause = %d Frame %p\n", type, trapframe); printf("Trap PC %p RA %p fault %p\n", trapframe->pc, trapframe->ra, trapframe->badvaddr); #ifdef DDB stacktrace(!USERMODE(trapframe->sr) ? trapframe : p->p_md.md_regs); kdb_trap(type, trapframe); #endif panic("trap"); } #ifdef FPUEMUL /* * If a relocated delay slot causes an exception, blame the * original delay slot address - userland is not supposed to * know anything about emulation bowels. */ if ((p->p_md.md_flags & MDP_FPUSED) != 0 && trapframe->badvaddr == p->p_md.md_fppgva) trapframe->badvaddr = p->p_md.md_fpslotva; #endif p->p_md.md_regs->pc = trapframe->pc; p->p_md.md_regs->cause = trapframe->cause; p->p_md.md_regs->badvaddr = trapframe->badvaddr; sv.sival_ptr = (void *)trapframe->badvaddr; KERNEL_LOCK(); trapsignal(p, i, ucode, typ, sv); KERNEL_UNLOCK(); out: /* * Note: we should only get here if returning to user mode. */ userret(p); }