static inline int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], ¤t->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], ¤t->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; }
/* * Further secondary CPU initialization. * * We are now running on our startup stack, with proper page tables. * There is nothing to do but display some details about the CPU and its CMMUs. */ void secondary_main() { struct cpu_info *ci = curcpu(); int s; cpu_configuration_print(0); ncpus++; sched_init_cpu(ci); nanouptime(&ci->ci_schedstate.spc_runtime); ci->ci_curproc = NULL; ci->ci_randseed = (arc4random() & 0x7fffffff) + 1; /* * Release cpu_hatch_mutex to let other secondary processors * have a chance to run. */ hatch_pending_count--; __cpu_simple_unlock(&cpu_hatch_mutex); /* wait for cpu_boot_secondary_processors() */ __cpu_simple_lock(&cpu_boot_mutex); __cpu_simple_unlock(&cpu_boot_mutex); spl0(); SCHED_LOCK(s); set_psr(get_psr() & ~PSR_IND); SET(ci->ci_flags, CIF_ALIVE); cpu_switchto(NULL, sched_chooseproc()); }
/* * invalidate I$ */ void m8820x_icache_inv(cpuid_t cpu, paddr_t pa, psize_t size) { u_int32_t psr; psize_t count; size = round_cache_line(pa + size) - trunc_cache_line(pa); pa = trunc_cache_line(pa); psr = get_psr(); set_psr(psr | PSR_IND); CMMU_LOCK; while (size != 0) { if ((pa & PAGE_MASK) == 0 && size >= PAGE_SIZE) { m8820x_cmmu_set_cmd(CMMU_FLUSH_CACHE_INV_PAGE, MODE_VAL, cpu, INST_CMMU, pa); count = PAGE_SIZE; } else { m8820x_cmmu_set_cmd(CMMU_FLUSH_CACHE_INV_LINE, MODE_VAL, cpu, INST_CMMU, pa); count = MC88200_CACHE_LINE; } pa += count; size -= count; m8820x_cmmu_wait(cpu); } CMMU_UNLOCK; set_psr(psr); }
void flush_thread(void) { /* Make sure old user windows don't get in the way. */ flush_user_windows(); current->tss.w_saved = 0; current->tss.uwinmask = 0; current->tss.sig_address = 0; current->tss.sig_desc = 0; current->tss.sstk_info.cur_status = 0; current->tss.sstk_info.the_stack = 0; if(last_task_used_math == current) { /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } memset(¤t->tss.reg_window[0], 0, (sizeof(struct dummy_reg_window) * NSWINS)); memset(¤t->tss.rwbuf_stkptrs[0], 0, (sizeof(unsigned long) * NSWINS)); /* Now, this task is no longer a kernel thread. */ current->tss.flags &= ~SPARC_FLAG_KTHREAD; }
target_ulong cpu_get_psr(CPUState *env1) { CPUState *saved_env; target_ulong ret; saved_env = env; env = env1; ret = get_psr(); env = saved_env; return ret; }
void m8820x_tlb_inv_all(cpuid_t cpu) { u_int32_t psr; psr = get_psr(); set_psr(psr | PSR_IND); CMMU_LOCK; m8820x_cmmu_set_reg(CMMU_SCR, CMMU_FLUSH_USER_ALL, 0, cpu, 0); CMMU_UNLOCK; set_psr(psr); }
int get_psr_at_epoch(char *psrname, double epoch, psrparams * psr) /* Converts info from the pulsar database to "current" epoch. */ /* Returned values go in *psr. */ /* psrname is the pulsar name we are looking for (no J or B prefix) */ /* epoch is the time in question in MJD. */ /* The int returned is the number of the pulsar in the database. */ /* If the int = 0, then no match was found. */ { int ii; double difft, f, fd; /* Get the pulsar's number from the database */ ii = psr_number_from_name(psrname); /* Pulsar not in the database. */ if (ii < 0) return 0; /* Pulsar is in the database. */ else { get_psr(ii, psr); difft = SECPERDAY * (epoch - psr->timepoch); psr->timepoch = epoch; f = psr->f; fd = psr->fd; psr->f = f + fd * difft + 0.5 * psr->fdd * difft * difft; psr->fd = fd + psr->fdd * difft; psr->p = 1.0 / psr->f; psr->pd = -psr->fd * psr->p * psr->p; psr->pdd = (2.0 * (fd * fd) / f - psr->fdd) / (f * f); /* Selected pulsar is binary... */ if (psr->orb.p != 0.0) { difft = SECPERDAY * (epoch - psr->orb.t); psr->orb.p = psr->orb.p * SECPERDAY + psr->orb.pd * difft; /* psr->orb.t is in seconds, _not_ MJD. It represents the time */ /* in sec _since_ the last periastron passage, _not_ when the */ /* next periastron will occur.... */ psr->orb.t = fmod(difft, psr->orb.p); if (psr->orb.t < 0.0) psr->orb.t += psr->orb.p; psr->orb.w = fmod((psr->orb.w + difft * psr->orb.wd / SECPERJULYR), 360.0); } } /* Return the number of the pulsar in the database. */ return ii; }
void m8820x_tlb_inv(cpuid_t cpu, u_int kernel, vaddr_t vaddr) { u_int32_t psr; psr = get_psr(); set_psr(psr | PSR_IND); CMMU_LOCK; m8820x_cmmu_set_cmd(kernel ? CMMU_FLUSH_SUPER_PAGE : CMMU_FLUSH_USER_PAGE, ADDR_VAL, cpu, 0, vaddr); CMMU_UNLOCK; set_psr(psr); }
/* stack dump and information dump * for stack dump, a common rules is: * 0x2000xxxx (r7) 0x0000xxxx(lr), r7 will in stack and lr will in flash. * usually 12th long word is the address which calls panic(). */ void panic(char *infostr) { uint32_t sp; uint32_t size; fsave(); kprintf("PANIC: %s\n", infostr); #if 0 if(get_psr() & 0xFF){ /* in exception context, dump exception stack */ sp = __get_MSP(); if((sp>(uint32_t)_irq_stack_start) && (sp<(uint32_t)_irq_stack_start+1024)) { size = (uint32_t)_irq_stack_start+1024-sp; kprintf("exception stacks: sp=0x%x depth=%d bytes\n", sp, size); dump_buffer((uint8_t *)sp, size); } else kprintf("broken MSP: 0x%x\n", sp); } if((current>=&systask[0]) && (current<&systask[MAX_TASK_NUMBER])) { /* dump task stack */ sp = __get_PSP(); if((sp>(uint32_t)current->stack_base) && (sp<(uint32_t)current->stack_base+current->stack_size)) { size = (uint32_t)current->stack_base+current->stack_size-sp; kprintf("task stacks: sp=0x%x depth=%d bytes\n", sp, size); dump_buffer((uint8_t *)sp, size); } else kprintf("broken PSP: 0x%x\n", sp); /* dump current task info */ kprintf("current=0x%x last sp=0x%x stack_base=0x%x taskname=%s state=%d taskq=0x%x\n", current, current->sp, current->stack_base, current->name, current->state, current->taskq); } else kprintf("current is overwriten! current=0x%x\n", current); #endif // 0 /* dump system ready queue */ /* dump memory usage */ memory_dump(); while(1); }
/* * Free current thread data structures etc.. */ void exit_thread(void) { //flush_user_windows(); //printk("exit_thread %i\n",current->pid); if(last_task_used_math == current) { /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } }
uint32_t m197_mp_atomic_begin(__cpu_simple_lock_t *lock, uint *csr) { uint32_t psr; psr = get_psr(); set_psr(psr | PSR_IND); *csr = *(volatile uint8_t *)(BS_BASE + BS_CPINT); *(volatile uint8_t *)(BS_BASE + BS_CPINT) = 0; __cpu_simple_lock(lock); return psr; }
void m8820x_set_uapr(apr_t ap) { u_int32_t psr; int cpu = cpu_number(); psr = get_psr(); set_psr(psr | PSR_IND); CMMU_LOCK; m8820x_cmmu_set_reg(CMMU_UAPR, ap, 0, cpu, 0); CMMU_UNLOCK; set_psr(psr); }
u_int m197_setipl(u_int level) { u_int curspl, psr; psr = get_psr(); set_psr(psr | PSR_IND); curspl = *(u_int8_t *)M197_IMASK & 0x07; *(u_int8_t *)M197_IMASK = level; /* * We do not flush the pipeline here, because interrupts are disabled, * and set_psr() will synchronize the pipeline. */ set_psr(psr); return curspl; }
/* * called at boot time, configure all devices on the system. */ void cpu_configure() { printf("bootpath: '%s' dev %u unit %u part %u\n", bootargs, bootdev, bootunit, bootpart); softintr_init(); if (config_rootfound("mainbus", "mainbus") == 0) panic("no mainbus found"); /* * Turn external interrupts on. */ set_psr(get_psr() & ~PSR_IND); spl0(); cold = 0; }
/* * called at boot time, configure all devices on the system. */ void cpu_configure() { if (config_rootfound("mainbus", "mainbus") == 0) panic("no mainbus found"); /* * Turn external interrupts on. * * XXX We have a race here. If we enable interrupts after setroot(), * the kernel dies. */ set_psr(get_psr() & ~PSR_IND); spl0(); setroot(); dumpconf(); cold = 0; }
void get_psrparams(psrparams * psr, char *psrname) /* Read a full pulsar database entry for pulsar psrname. */ /* Return the data in a psrparams structure. */ { int pnum = 0; /* Read the database if needed */ if (!have_database) np = read_database(); /* Find the pulsar of interest */ pnum = psr_number_from_name(psrname); /* Fill the structure with data */ if (pnum >= 0) get_psr(pnum, psr); else { printf("Could not find the PSR in the database in get_psrparams().\n"); exit(2); } }
/* * called at boot time, configure all devices on the system. */ void cpu_configure() { softintr_init(); if (config_rootfound("mainbus", "mainbus") == 0) panic("no mainbus found"); /* * Switch to our final trap vectors, and unmap the PROM data area. */ set_vbr(kernel_vbr); pmap_unmap_firmware(); cold = 0; /* * Turn external interrupts on. */ set_psr(get_psr() & ~PSR_IND); spl0(); }
void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { /* Sanity check... */ if(psr & PSR_PS) die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs); put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */ regs->psr |= PSR_EF; #ifndef CONFIG_SMP if(last_task_used_math == current) return; if(last_task_used_math) { /* Other processes fpu state, save away */ struct task_struct *fptask = last_task_used_math; fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); } last_task_used_math = current; if(current->used_math) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } else { /* Set initial sane state. */ fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } #else if(!current->used_math) { fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } else { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } current->flags |= PF_USEDFPU; #endif }
/* * Device interrupt handler for MVME197 */ void m197_intr(struct trapframe *eframe) { u_int32_t psr; int level; struct intrhand *intr; intrhand_t *list; int ret; vaddr_t ivec; u_int8_t vec; #ifdef MULTIPROCESSOR if (eframe->tf_mask < IPL_SCHED) __mp_lock(&kernel_lock); #endif uvmexp.intrs++; level = *(u_int8_t *)M197_ILEVEL & 0x07; /* generate IACK and get the vector */ ivec = M197_IACK + (level << 2) + 0x03; vec = *(volatile u_int8_t *)ivec; /* block interrupts at level or lower */ m197_setipl(level); psr = get_psr(); set_psr(psr & ~PSR_IND); list = &intr_handlers[vec]; if (SLIST_EMPTY(list)) printf("Spurious interrupt (level %x and vec %x)\n", level, vec); /* * Walk through all interrupt handlers in the chain for the * given vector, calling each handler in turn, till some handler * returns a value != 0. */ ret = 0; SLIST_FOREACH(intr, list, ih_link) { if (intr->ih_wantframe != 0) ret = (*intr->ih_fn)((void *)eframe); else ret = (*intr->ih_fn)(intr->ih_arg); if (ret != 0) { intr->ih_count.ec_count++; break; } } if (ret == 0) { printf("Unclaimed interrupt (level %x and vec %x)\n", level, vec); } #if 0 /* * Disable interrupts before returning to assembler, * the spl will be restored later. */ set_psr(psr | PSR_IND); #endif #ifdef MULTIPROCESSOR if (eframe->tf_mask < IPL_SCHED) __mp_unlock(&kernel_lock); #endif }
void m88100_trap(unsigned type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type, pbus_type; u_long fault_code; unsigned fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; extern struct vm_map *kernel_map; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_sxip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #if defined(DDB) case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); splx(s); return; #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_INT: case T_INT+T_USER: curcpu()->ci_intrdepth++; md_interrupt_func(T_INT, frame); curcpu()->ci_intrdepth--; return; case T_MISALGNFLT: printf("kernel misaligned access exception @ 0x%08x\n", frame->tf_sxip); panictrap(frame->tf_vector, frame); break; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif panictrap(frame->tf_vector, frame); break; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dmt0 & DMT_DAS) == 0) { type = T_DATAFLT + T_USER; goto user_fault; } fault_addr = frame->tf_dma0; if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE); vm = p->p_vmspace; map = kernel_map; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif switch (pbus_type) { case CMMU_PFSR_SUCCESS: /* * The fault was resolved. Call data_access_emulation * to drain the data unit pipe line and reset dmt0 * so that trap won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; case CMMU_PFSR_SFAULT: case CMMU_PFSR_PFAULT: if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { /* * We could resolve the fault. Call * data_access_emulation to drain the data * unit pipe line and reset dmt0 so that trap * won't get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; KERNEL_UNLOCK(); return; } break; } #ifdef TRAPDEBUG printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, pbus_exception_type[pbus_type], va); #endif KERNEL_UNLOCK(); panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: user_fault: if (type == T_INSTFLT + T_USER) { pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); #ifdef TRAPDEBUG printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } else { fault_addr = frame->tf_dma0; pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); #ifdef TRAPDEBUG printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", pbus_type, pbus_exception_type[pbus_type], fault_addr, frame, frame->tf_cpu); #endif } if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { ftype = VM_PROT_READ | VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } else { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } va = trunc_page((vaddr_t)fault_addr); KERNEL_PROC_LOCK(p); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* Call uvm_fault() to resolve non-bus error faults */ switch (pbus_type) { case CMMU_PFSR_SUCCESS: result = 0; break; case CMMU_PFSR_BERROR: result = EACCES; break; default: result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); break; } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_PROC_UNLOCK(p); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_snip = pcb_onfault | NIP_V; frame->tf_sfip = (pcb_onfault + 4) | FIP_V; frame->tf_sxip = 0; /* * Continue as if the fault had been resolved, but * do not try to complete the faulting access. */ frame->tf_dmt0 |= DMT_SKIP; result = 0; } if (result == 0) { if (type == T_DATAFLT+T_USER) { /* * We could resolve the fault. Call * data_access_emulation to drain the data unit * pipe line and reset dmt0 so that trap won't * get called again. */ data_access_emulation((unsigned *)frame); frame->tf_dpfsr = 0; frame->tf_dmt0 = 0; } else { /* * back up SXIP, SNIP, * clearing the Error bit */ frame->tf_sfip = frame->tf_snip & ~FIP_E; frame->tf_snip = frame->tf_sxip & ~NIP_E; frame->tf_ipfsr = 0; } } else { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } #if 1 frame->tf_sfip = frame->tf_snip; frame->tf_snip = pc | NIP_V; #endif sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ frame->tf_sfip = frame->tf_snip; frame->tf_snip = frame->tf_sxip; sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { KERNEL_PROC_LOCK(p); ADDUPROF(p); KERNEL_PROC_UNLOCK(p); } if (curcpu()->ci_want_resched) preempt(NULL); break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; KERNEL_PROC_LOCK(p); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_PROC_UNLOCK(p); /* * don't want multiple faults - we are going to * deliver signal. */ frame->tf_dmt0 = 0; frame->tf_ipfsr = frame->tf_dpfsr = 0; } userret(p); }
/* * Free current thread data structures etc.. */ void exit_thread(void) { #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } } void flush_thread(void) { current->thread.w_saved = 0; /* No new signal delivery by default */ current->thread.new_signal = 0; #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } /* Now, this task is no longer a kernel thread. */ current->thread.current_ds = USER_DS; if (current->thread.flags & SPARC_FLAG_KTHREAD) { current->thread.flags &= ~SPARC_FLAG_KTHREAD; /* We must fixup kregs as well. */ current->thread.kregs = (struct pt_regs *) (((unsigned long)current) + (TASK_UNION_SIZE - TRACEREG_SZ)); } } static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src) { __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t" "ldd\t[%1 + 0x08], %%g4\n\t" "ldd\t[%1 + 0x10], %%o4\n\t" "std\t%%g2, [%0 + 0x00]\n\t" "std\t%%g4, [%0 + 0x08]\n\t" "std\t%%o4, [%0 + 0x10]\n\t" "ldd\t[%1 + 0x18], %%g2\n\t" "ldd\t[%1 + 0x20], %%g4\n\t" "ldd\t[%1 + 0x28], %%o4\n\t" "std\t%%g2, [%0 + 0x18]\n\t" "std\t%%g4, [%0 + 0x20]\n\t" "std\t%%o4, [%0 + 0x28]\n\t" "ldd\t[%1 + 0x30], %%g2\n\t" "ldd\t[%1 + 0x38], %%g4\n\t" "ldd\t[%1 + 0x40], %%o4\n\t" "std\t%%g2, [%0 + 0x30]\n\t" "std\t%%g4, [%0 + 0x38]\n\t" "ldd\t[%1 + 0x48], %%g2\n\t" "std\t%%o4, [%0 + 0x40]\n\t" "std\t%%g2, [%0 + 0x48]\n\t" : : "r" (dst), "r" (src) : "g2", "g3", "g4", "g5", "o4", "o5"); }
void m88110_trap(unsigned type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; unsigned fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; pt_entry_t *pte; extern struct vm_map *kernel_map; extern pt_entry_t *pmap_pte(pmap_t, vaddr_t); uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } fault_type = 0; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; switch (type) { default: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_110_DRM+T_USER: case T_110_DRM: #ifdef DEBUG printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_110_DWM+T_USER: case T_110_DWM: #ifdef DEBUG printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_110_IAM+T_USER: case T_110_IAM: #ifdef DEBUG printf("IMMU miss: Hardware Table Searches should be enabled!\n"); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DDB case T_KDB_TRACE: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); /* skip one instruction */ if (frame->tf_exip & 1) frame->tf_exip = frame->tf_enip; else frame->tf_exip += 4; splx(s); return; #if 0 case T_ILLFLT: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" : "error fault", (db_regs_t*)frame); set_psr(psr); splx(s); return; #endif /* 0 */ #endif /* DDB */ case T_ILLFLT: printf("Unimplemented opcode!\n"); panictrap(frame->tf_vector, frame); break; case T_NON_MASK: case T_NON_MASK+T_USER: curcpu()->ci_intrdepth++; md_interrupt_func(T_NON_MASK, frame); curcpu()->ci_intrdepth--; return; case T_INT: case T_INT+T_USER: curcpu()->ci_intrdepth++; md_interrupt_func(T_INT, frame); curcpu()->ci_intrdepth--; return; case T_MISALGNFLT: printf("kernel mode misaligned access exception @ 0x%08x\n", frame->tf_exip); panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { type = T_DATAFLT + T_USER; goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); if (va == 0) { panic("trap: bad kernel access at %x", fault_addr); } KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE); vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { frame->tf_dsr &= ~CMMU_DSR_WE; /* undefined */ /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { KERNEL_UNLOCK(); return; } } if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* * This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write error, * then we already have a pte entry that faulted * in from a previous seg fault or page fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(map->pmap, va); #ifdef DEBUG if (pte == NULL) { KERNEL_UNLOCK(); panic("NULL pte on write fault??"); } #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* Set modified bit and try the write again. */ #ifdef TRAPDEBUG printf("Corrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; KERNEL_UNLOCK(); return; #if 1 /* shouldn't happen */ } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected kernel write fault, map %x pte %x\n", map->pmap, *pte); #endif if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if (result == 0) { KERNEL_UNLOCK(); return; } #endif } } KERNEL_UNLOCK(); panictrap(frame->tf_vector, frame); /* NOTREACHED */ case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); KERNEL_PROC_LOCK(p); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_DATAFLT+T_USER) { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ pte = pmap_pte(vm_map_pmap(map), va); #ifdef DEBUG if (pte == NULL) { KERNEL_PROC_UNLOCK(p); panic("NULL pte on write fault??"); } #endif if (!(*pte & PG_M) && !(*pte & PG_RO)) { /* * Set modified bit and try the * write again. */ #ifdef TRAPDEBUG printf("Corrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif *pte |= PG_M; /* * invalidate ATCs to force * table search */ set_dcmd(CMMU_DCMD_INV_UATC); KERNEL_PROC_UNLOCK(p); return; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, map %x pte %x\n", map->pmap, *pte); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif KERNEL_PROC_UNLOCK(p); panictrap(frame->tf_vector, frame); } } else { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif KERNEL_PROC_UNLOCK(p); panictrap(frame->tf_vector, frame); } } if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_PROC_UNLOCK(p); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; break; case T_PRIVINFLT+T_USER: case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; break; case T_FPEPFLT+T_USER: sig = SIGFPE; break; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; case T_ASTFLT+T_USER: uvmexp.softs++; p->p_md.md_astpending = 0; if (p->p_flag & P_OWEUPC) { KERNEL_PROC_LOCK(p); ADDUPROF(p); KERNEL_PROC_UNLOCK(p); } if (curcpu()->ci_want_resched) preempt(NULL); break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { sv.sival_int = fault_addr; KERNEL_PROC_LOCK(p); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_PROC_UNLOCK(p); } userret(p); }
void m197_send_complex_ipi(int ipi, cpuid_t cpu, u_int32_t arg1, u_int32_t arg2) { struct cpu_info *ci = &m88k_cpus[cpu]; uint32_t psr; int wait; if ((ci->ci_flags & CIF_ALIVE) == 0) return; /* XXX not ready yet */ if (ci->ci_ddb_state == CI_DDB_PAUSE) return; /* XXX skirting deadlock */ psr = get_psr(); set_psr(psr | PSR_IND); /* * Wait for the other processor to be ready to accept an IPI. */ for (wait = 1000000; wait != 0; wait--) { if (!ISSET(*(volatile u_int8_t *)(BS_BASE + BS_CPINT), BS_CPI_STAT)) break; } if (wait == 0) panic("couldn't send complex ipi %x to cpu %d: busy", ipi, cpu); #ifdef DEBUG if (ci->ci_ipi != 0) printf("%s: cpu %d ipi %x did not clear during wait\n", __func__, ci->ci_cpuid, ci->ci_ipi); #endif /* * In addition to the ipi bit itself, we need to set up ipi arguments. * Note that we do not need to protect against another processor * trying to send another complex IPI, since we know there are only * two processors on the board. This is also why we do not use atomic * operations on ci_ipi there, since we know from the loop above that * the other process is done doing any IPI work. */ ci->ci_ipi_arg1 = arg1; ci->ci_ipi_arg2 = arg2; ci->ci_ipi |= ipi; *(volatile u_int8_t *)(BS_BASE + BS_CPINT) |= BS_CPI_SCPI; /* * Wait for the other processor to complete ipi processing. */ for (wait = 1000000; wait != 0; wait--) { if (!ISSET(*(volatile u_int8_t *)(BS_BASE + BS_CPINT), BS_CPI_STAT)) break; } if (wait == 0) panic("couldn't send complex ipi %x to cpu %d: no ack", ipi, cpu); #ifdef DEBUG /* * If there are any simple IPIs pending, trigger them now. * There really shouldn't any, since we have waited for all * asynchronous ipi processing to complete before sending this * one. */ if (ci->ci_ipi != 0) { printf("%s: cpu %d ipi %x did not clear after completion\n", __func__, ci->ci_cpuid, ci->ci_ipi); *(volatile u_int8_t *)(BS_BASE + BS_CPINT) |= BS_CPI_SCPI; } #endif set_psr(psr); }
void m88110_trap(u_int type, struct trapframe *frame) { struct proc *p; struct vm_map *map; vaddr_t va, pcb_onfault; vm_prot_t ftype; int fault_type; u_long fault_code; vaddr_t fault_addr; struct vmspace *vm; union sigval sv; int result; #ifdef DDB int s; u_int psr; #endif int sig = 0; uvmexp.traps++; if ((p = curproc) == NULL) p = &proc0; fault_type = SI_NOINFO; fault_code = 0; fault_addr = frame->tf_exip & XIP_ADDR; /* * 88110 errata #16 (4.2) or #3 (5.1.1): * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension * can cause the enip value to be incremented by 4 incorrectly * if the instruction in the delay slot is the first word of a * page which misses in the mmu and results in a hardware * tablewalk which encounters an exception or an invalid * descriptor. The exip value in this case will point to the * first word of the page, and the D bit will be set. * * Note: if the instruction is a jsr.n r1, r1 will be overwritten * with erroneous data. Therefore, no recovery is possible. Do * not allow this instruction to occupy the last word of a page. * * Suggested fix: recover in general by backing up the exip by 4 * and clearing the delay bit before an rte when the lower 3 hex * digits of the exip are 001.'' */ if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) { u_int instr; /* * Note that we have initialized fault_addr above, so that * signals provide the correct address if necessary. */ frame->tf_exip = (frame->tf_exip & ~1) - 4; /* * Check the instruction at the (backed up) exip. * If it is a jsr.n, abort. */ if (!USERMODE(frame->tf_epsr)) { instr = *(u_int *)fault_addr; if (instr == 0xf400cc01) panic("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); } else { /* copyin here should not fail */ if (copyin((const void *)frame->tf_exip, &instr, sizeof instr) == 0 && instr == 0xf400cc01) { uprintf("mc88110 errata #16, exip %p enip %p", (frame->tf_exip + 4) | 1, frame->tf_enip); sig = SIGILL; } } } if (USERMODE(frame->tf_epsr)) { type += T_USER; p->p_md.md_tf = frame; /* for ptrace/signals */ } if (sig != 0) goto deliver; switch (type) { default: lose: panictrap(frame->tf_vector, frame); break; /*NOTREACHED*/ #ifdef DEBUG case T_110_DRM+T_USER: case T_110_DRM: printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_DWM+T_USER: case T_110_DWM: printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); goto lose; case T_110_IAM+T_USER: case T_110_IAM: printf("IMMU miss: Hardware Table Searches should be enabled!\n"); goto lose; #endif #ifdef DDB case T_KDB_TRACE: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_BREAK: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); set_psr(psr); splx(s); return; case T_KDB_ENTRY: s = splhigh(); set_psr((psr = get_psr()) & ~PSR_IND); ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); set_psr(psr); /* skip trap instruction */ m88110_skip_insn(frame); splx(s); return; #endif /* DDB */ case T_ILLFLT: /* * The 88110 seems to trigger an instruction fault in * supervisor mode when running the following sequence: * * bcnd.n cond, reg, 1f * arithmetic insn * ... * the same exact arithmetic insn * 1: another arithmetic insn stalled by the previous one * ... * * The exception is reported with exip pointing to the * branch address. I don't know, at this point, if there * is any better workaround than the aggressive one * implemented below; I don't see how this could relate to * any of the 88110 errata (although it might be related to * branch prediction). * * For the record, the exact sequence triggering the * spurious exception is: * * bcnd.n eq0, r2, 1f * or r25, r0, r22 * bsr somewhere * or r25, r0, r22 * 1: cmp r13, r25, r20 * * within the same cache line. * * Simply ignoring the exception and returning does not * cause the exception to disappear. Clearing the * instruction cache works, but on 88110+88410 systems, * the 88410 needs to be invalidated as well. (note that * the size passed to the flush routines does not matter * since there is no way to flush a subset of the 88110 * I$ anyway) */ { extern void *kernel_text, *etext; if (fault_addr >= (vaddr_t)&kernel_text && fault_addr < (vaddr_t)&etext) { cmmu_icache_inv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); cmmu_cache_wbinv(curcpu()->ci_cpuid, trunc_page(fault_addr), PAGE_SIZE); return; } } goto lose; case T_MISALGNFLT: printf("kernel misaligned access exception @%p\n", frame->tf_exip); goto lose; case T_INSTFLT: /* kernel mode instruction access fault. * Should never, never happen for a non-paged kernel. */ #ifdef TRAPDEBUG printf("Kernel Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif goto lose; case T_DATAFLT: /* kernel mode data fault */ /* data fault on the user address? */ if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { KERNEL_LOCK(); goto m88110_user_fault; } #ifdef TRAPDEBUG printf("Kernel Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } va = trunc_page((vaddr_t)fault_addr); KERNEL_LOCK(); vm = p->p_vmspace; map = kernel_map; if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* * On a segment or a page fault, call uvm_fault() to * resolve the fault. */ if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); p->p_addr->u_pcb.pcb_onfault = pcb_onfault; /* * This could be a fault caused in copyout*() * while accessing kernel space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result == 0) { KERNEL_UNLOCK(); return; } } KERNEL_UNLOCK(); goto lose; case T_INSTFLT+T_USER: /* User mode instruction access fault */ /* FALLTHROUGH */ case T_DATAFLT+T_USER: KERNEL_LOCK(); m88110_user_fault: if (type == T_INSTFLT+T_USER) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; #ifdef TRAPDEBUG printf("User Instruction fault exip %x isr %x ilar %x\n", frame->tf_exip, frame->tf_isr, frame->tf_ilar); #endif } else { fault_addr = frame->tf_dlar; if (frame->tf_dsr & CMMU_DSR_RW) { ftype = VM_PROT_READ; fault_code = VM_PROT_READ; } else { ftype = VM_PROT_READ|VM_PROT_WRITE; fault_code = VM_PROT_WRITE; } #ifdef TRAPDEBUG printf("User Data access fault exip %x dsr %x dlar %x\n", frame->tf_exip, frame->tf_dsr, frame->tf_dlar); #endif } va = trunc_page((vaddr_t)fault_addr); vm = p->p_vmspace; map = &vm->vm_map; if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) p->p_addr->u_pcb.pcb_onfault = 0; /* * Call uvm_fault() to resolve non-bus error faults * whenever possible. */ if (type == T_INSTFLT+T_USER) { /* instruction faults */ if (frame->tf_isr & (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { /* bus error, supervisor protection */ result = EACCES; } else if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else { #ifdef TRAPDEBUG printf("Unexpected Instruction fault isr %x\n", frame->tf_isr); #endif KERNEL_UNLOCK(); goto lose; } } else { /* data faults */ if (frame->tf_dsr & CMMU_DSR_BE) { /* bus error */ result = EACCES; } else if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { /* segment or page fault */ result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } else if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { /* copyback or write allocate error */ result = EACCES; } else if (frame->tf_dsr & CMMU_DSR_WE) { /* write fault */ /* This could be a write protection fault or an * exception to set the used and modified bits * in the pte. Basically, if we got a write * error, then we already have a pte entry that * faulted in from a previous seg fault or page * fault. * Get the pte and check the status of the * modified and valid bits to determine if this * indeed a real write fault. XXX smurph */ if (pmap_set_modify(map->pmap, va)) { #ifdef TRAPDEBUG printf("Corrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = 0; } else { /* must be a real wp fault */ #ifdef TRAPDEBUG printf("Uncorrected userland write fault, pmap %p va %p\n", map->pmap, va); #endif result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); } } else { #ifdef TRAPDEBUG printf("Unexpected Data access fault dsr %x\n", frame->tf_dsr); #endif KERNEL_UNLOCK(); goto lose; } } p->p_addr->u_pcb.pcb_onfault = pcb_onfault; if ((caddr_t)va >= vm->vm_maxsaddr) { if (result == 0) uvm_grow(p, va); else if (result == EACCES) result = EFAULT; } KERNEL_UNLOCK(); /* * This could be a fault caused in copyin*() * while accessing user space. */ if (result != 0 && pcb_onfault != 0) { frame->tf_exip = pcb_onfault; /* * Continue as if the fault had been resolved. */ result = 0; } if (result != 0) { sig = result == EACCES ? SIGBUS : SIGSEGV; fault_type = result == EACCES ? BUS_ADRERR : SEGV_MAPERR; } break; case T_MISALGNFLT+T_USER: /* Fix any misaligned ld.d or st.d instructions */ sig = double_reg_fixup(frame); fault_type = BUS_ADRALN; if (sig == 0) { /* skip recovered instruction */ m88110_skip_insn(frame); goto userexit; } break; case T_PRIVINFLT+T_USER: fault_type = ILL_PRVREG; /* FALLTHROUGH */ case T_ILLFLT+T_USER: #ifndef DDB case T_KDB_BREAK: case T_KDB_ENTRY: case T_KDB_TRACE: #endif case T_KDB_BREAK+T_USER: case T_KDB_ENTRY+T_USER: case T_KDB_TRACE+T_USER: sig = SIGILL; break; case T_BNDFLT+T_USER: sig = SIGFPE; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_ZERODIV+T_USER: sig = SIGFPE; fault_type = FPE_INTDIV; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_OVFFLT+T_USER: sig = SIGFPE; fault_type = FPE_INTOVF; /* skip trap instruction */ m88110_skip_insn(frame); break; case T_FPEPFLT+T_USER: m88110_fpu_exception(frame); goto userexit; case T_SIGSYS+T_USER: sig = SIGSYS; break; case T_STEPBPT+T_USER: #ifdef PTRACE /* * This trap is used by the kernel to support single-step * debugging (although any user could generate this trap * which should probably be handled differently). When a * process is continued by a debugger with the PT_STEP * function of ptrace (single step), the kernel inserts * one or two breakpoints in the user process so that only * one instruction (or two in the case of a delayed branch) * is executed. When this breakpoint is hit, we get the * T_STEPBPT trap. */ { u_int instr; vaddr_t pc = PC_REGS(&frame->tf_regs); /* read break instruction */ copyin((caddr_t)pc, &instr, sizeof(u_int)); /* check and see if we got here by accident */ if ((p->p_md.md_bp0va != pc && p->p_md.md_bp1va != pc) || instr != SSBREAKPOINT) { sig = SIGTRAP; fault_type = TRAP_TRACE; break; } /* restore original instruction and clear breakpoint */ if (p->p_md.md_bp0va == pc) { ss_put_value(p, pc, p->p_md.md_bp0save); p->p_md.md_bp0va = 0; } if (p->p_md.md_bp1va == pc) { ss_put_value(p, pc, p->p_md.md_bp1save); p->p_md.md_bp1va = 0; } sig = SIGTRAP; fault_type = TRAP_BRKPT; } #else sig = SIGTRAP; fault_type = TRAP_TRACE; #endif break; case T_USERBPT+T_USER: /* * This trap is meant to be used by debuggers to implement * breakpoint debugging. When we get this trap, we just * return a signal which gets caught by the debugger. */ sig = SIGTRAP; fault_type = TRAP_BRKPT; break; } /* * If trap from supervisor mode, just return */ if (type < T_USER) return; if (sig) { deliver: sv.sival_ptr = (void *)fault_addr; KERNEL_LOCK(); trapsignal(p, sig, fault_code, fault_type, sv); KERNEL_UNLOCK(); } userexit: userret(p); }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static int calls; siginfo_t info; unsigned long fsr; int ret = 0; #ifndef CONFIG_SMP struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef CONFIG_SMP if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr, &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth); #ifdef DEBUG_FPU printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr); #endif switch ((fpt->thread.fsr & 0x1c000)) { /* switch on the contents of the ftt [floating point trap type] field */ #ifdef DEBUG_FPU case (1 << 14): printk("IEEE_754_exception\n"); break; #endif case (2 << 14): /* unfinished_FPop (underflow & co) */ case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */ ret = do_mathemu(regs, fpt); break; #ifdef DEBUG_FPU case (4 << 14): printk("sequence_error (OS bug...)\n"); break; case (5 << 14): printk("hardware_error (uhoh!)\n"); break; case (6 << 14): printk("invalid_fp_register (user error)\n"); break; #endif /* DEBUG_FPU */ } /* If we successfully emulated the FPop, we pretend the trap never happened :-> */ if (ret) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); return; } /* nope, better SIGFPE the offending process... */ #ifdef CONFIG_SMP fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } fsr = fpt->thread.fsr; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)pc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } send_sig_info(SIGFPE, &info, fpt); #ifndef CONFIG_SMP last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void *)pc; info.si_trapno = 0; send_sig_info(SIGEMT, &info, current); }
void mc88410_wb_page(paddr_t physaddr) { paddr_t xccaddr = XCC_ADDR | (physaddr >> PGSHIFT); u_int psr; u_int16_t bs_gcsr, bs_romcr; bs_gcsr = *(volatile u_int16_t *)(BS_BASE + BS_GCSR); bs_romcr = *(volatile u_int16_t *)(BS_BASE + BS_ROMCR); /* * Since the page number is unlikely to be a multiple of 4, we need * to mask misaligned exceptions. */ set_psr((psr = get_psr()) | PSR_MXM); /* clear WEN0 and WEN1 in ROMCR (disables writes to FLASH) */ *(volatile u_int16_t *)(BS_BASE + BS_ROMCR) = bs_romcr & ~(BS_ROMCR_WEN0 | BS_ROMCR_WEN1); /* set XCC bit in GCSR (0xff8xxxxx now decodes to mc88410) */ *(volatile u_int16_t *)(BS_BASE + BS_GCSR) = bs_gcsr | BS_GCSR_XCC; /* send command */ __asm__ __volatile__ ( "or r2, r0, %0\n\t" "or r3, r0, r0\n\t" "st.d r2, %1, 0" : : "i" (XCC_WB_PAGE), "r" (xccaddr) : "r2", "r3"); /* spin until the operation is complete */
void copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct sparc_stackf *old_stack, *new_stack; unsigned long stack_offset; //flush_user_windows(); //printk ("copy_thread\n"); //show_regs(regs); if(last_task_used_math == current) { put_psr(get_psr() | PSR_EF); fpsave(&p->tss.float_regs[0], &p->tss.fsr, &p->tss.fpqueue[0], &p->tss.fpqdepth); } /* Calculate offset to stack_frame & pt_regs */ stack_offset = ((PAGE_SIZE ) - TRACEREG_SZ); /* * p->kernel_stack_page new_stack childregs * ! ! ! {if(PSR_PS) } * V V (stk.fr.) V (pt_regs) { (stk.fr.) } * +----- - - - - - ------+===========+============={+==========}+ */ if(regs->psr & PSR_PS) stack_offset -= REGWIN_SZ; childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset)); *childregs = *regs; new_stack = (((struct sparc_stackf *) childregs) - 1); old_stack = (((struct sparc_stackf *) regs) - 1); *new_stack = *old_stack; p->tss.ksp = p->saved_kernel_stack = (unsigned long) new_stack; p->tss.kpc = (((unsigned long) ret_sys_call) - 0x8); p->tss.kpsr = current->tss.fork_kpsr; p->tss.kwim = current->tss.fork_kwim; p->tss.kregs = childregs; childregs->u_regs[UREG_FP] = sp; if(regs->psr & PSR_PS) { stack_offset += TRACEREG_SZ; childregs->u_regs[UREG_FP] = p->kernel_stack_page + stack_offset; p->tss.flags |= SPARC_FLAG_KTHREAD; } else { struct sparc_stackf *childstack; struct sparc_stackf *parentstack; p->tss.flags &= ~SPARC_FLAG_KTHREAD; childstack = (struct sparc_stackf *) (sp & ~0x7UL); parentstack = (struct sparc_stackf *) regs->u_regs[UREG_FP]; if (childstack == parentstack) { //adapt the copy depth when after fork() parent pushes more stack frames. childstack = clone_stackframe(childstack, parentstack,3,1024); } else { childstack = clone_stackframe(childstack, parentstack,3,0); } childregs->u_regs[UREG_FP] = (unsigned long)childstack; /* printk("Parent stack\n"); __show_backtrace(parentstack); printk("Child stack\n"); __show_backtrace(childstack); */ } /* Set the return value for the child. */ childregs->u_regs[UREG_I0] = current->pid; childregs->u_regs[UREG_I1] = 1; /* Set the return value for the parent. */ regs->u_regs[UREG_I1] = 0; /* printk("Parent: (%i)\n",current->pid); show_regs(regs); printk("Child: (%i)\n",p->pid); show_regs(childregs); */ }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static calls = 0; #ifndef __SMP__ struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef __SMP__ if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr, &fpt->tss.fpqueue[0], &fpt->tss.fpqdepth); fpt->tss.sig_address = pc; fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */ #ifdef __SMP__ fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } send_sig(SIGFPE, fpt, 1); #ifndef __SMP__ last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); current->tss.sig_address = pc; current->tss.sig_desc = SUBSIG_TAG; /* as good as any */ send_sig(SIGEMT, current, 1); } void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { #ifdef TRAP_DEBUG printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif if(psr & PSR_PS) panic("Tell me what a watchpoint trap is, and I'll then deal " "with such a beast..."); }
void m8820x_dma_cachectl(paddr_t _pa, psize_t _size, int op) { u_int32_t psr; int cpu; #ifdef MULTIPROCESSOR struct cpu_info *ci = curcpu(); #endif paddr_t pa; psize_t size, count; void (*flusher)(int, paddr_t, psize_t); uint8_t lines[2 * MC88200_CACHE_LINE]; paddr_t pa1, pa2; psize_t sz1, sz2; pa = trunc_cache_line(_pa); size = round_cache_line(_pa + _size) - pa; sz1 = sz2 = 0; switch (op) { case DMA_CACHE_SYNC: flusher = m8820x_cmmu_wb_locked; break; case DMA_CACHE_SYNC_INVAL: flusher = m8820x_cmmu_wbinv_locked; break; default: case DMA_CACHE_INV: pa1 = pa; sz1 = _pa - pa1; pa2 = _pa + _size; sz2 = pa + size - pa2; flusher = m8820x_cmmu_inv_locked; break; } #ifndef MULTIPROCESSOR cpu = cpu_number(); #endif psr = get_psr(); set_psr(psr | PSR_IND); CMMU_LOCK; /* * Preserve the data from incomplete cache lines about to be * invalidated, if necessary. */ if (sz1 != 0) bcopy((void *)pa1, lines, sz1); if (sz2 != 0) bcopy((void *)pa2, lines + MC88200_CACHE_LINE, sz2); while (size != 0) { count = (pa & PAGE_MASK) == 0 && size >= PAGE_SIZE ? PAGE_SIZE : MC88200_CACHE_LINE; #ifdef MULTIPROCESSOR /* writeback on a single cpu... */ (*flusher)(ci->ci_cpuid, pa, count); /* invalidate on all... */ if (flusher != m8820x_cmmu_wb_locked) { for (cpu = 0; cpu < MAX_CPUS; cpu++) { if (!ISSET(m88k_cpus[cpu].ci_flags, CIF_ALIVE)) continue; if (cpu == ci->ci_cpuid) continue; m8820x_cmmu_inv_locked(cpu, pa, count); } } #else /* MULTIPROCESSOR */ (*flusher)(cpu, pa, count); #endif /* MULTIPROCESSOR */ pa += count; size -= count; } /* * Restore data from incomplete cache lines having been invalidated, * if necessary. */ if (sz1 != 0) bcopy(lines, (void *)pa1, sz1); if (sz2 != 0) bcopy(lines + MC88200_CACHE_LINE, (void *)pa2, sz2); CMMU_UNLOCK; set_psr(psr); }
target_ulong helper_rdpsr(void) { return get_psr(); }