static inline int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], ¤t->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], ¤t->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; }
/* * math coprocessor error */ static void matherror(Ureg *ur, void*) { ulong status, pc; /* * a write cycle to port 0xF0 clears the interrupt latch attached * to the error# line from the 387 */ if(!(m->cpuiddx & Fpuonchip)) outb(0xF0, 0xFF); /* * save floating point state to check out error */ fpenv(&up->fpsave); /* result ignored, but masks fp exceptions */ fpsave(&up->fpsave); /* also turns fpu off */ fpon(); mathnote(); if((ur->pc & 0xf0000000) == KZERO){ mathstate(&status, &pc, nil); panic("fp: status %#lux fppc=%#lux pc=%#lux", status, pc, ur->pc); } }
void main(void) { memset(edata, 0, (ulong)end-(ulong)edata); conf.nmach = 1; machinit(); confinit(); xinit(); trapinit(); mmuinit(); plan9iniinit(); hwintrinit(); clockinit(); timerinit(); console(); quotefmtinstall(); printinit(); cpuidprint(); print("\nPlan 9 from Bell Labs\n"); procinit0(); initseg(); timersinit(); links(); chandevreset(); pageinit(); swapinit(); sharedseginit(); fpsave(&initfp); initfp.fpscr = 0; userinit(); schedinit(); }
static inline int setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; int fpvalid; fpvalid = !!used_math(); err |= __put_user(fpvalid, &sc->sc_fpvalid); if (! fpvalid) return err; if (current == last_task_used_math) { grab_fpu(); fpsave(¤t->thread.fpu.hard); release_fpu(); last_task_used_math = NULL; regs->sr |= SR_FD; } err |= __copy_to_user(&sc->sc_fpregs[0], ¤t->thread.fpu.hard, (sizeof(long long) * 32) + (sizeof(int) * 1)); clear_used_math(); return err; }
void flush_thread(void) { /* Make sure old user windows don't get in the way. */ flush_user_windows(); current->tss.w_saved = 0; current->tss.uwinmask = 0; current->tss.sig_address = 0; current->tss.sig_desc = 0; current->tss.sstk_info.cur_status = 0; current->tss.sstk_info.the_stack = 0; if(last_task_used_math == current) { /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } memset(¤t->tss.reg_window[0], 0, (sizeof(struct dummy_reg_window) * NSWINS)); memset(¤t->tss.rwbuf_stkptrs[0], 0, (sizeof(unsigned long) * NSWINS)); /* Now, this task is no longer a kernel thread. */ current->tss.flags &= ~SPARC_FLAG_KTHREAD; }
/* * Save the mach dependent part of the process state. */ void procsave(Proc *p) { if(p->fpstate == FPactive){ if(p->state != Moribund) fpsave(&up->fpsave); p->fpstate = FPinactive; } }
/* * Save the mach dependent part of the process state. */ void procsave(Proc *p) { uvlong t; cycles(&t); p->pcycles += t; if(p->fpstate == FPactive){ if(p->state != Moribund) fpsave(&up->fpsave); p->fpstate = FPinactive; } }
/* * Free current thread data structures etc.. */ void exit_thread(void) { //flush_user_windows(); //printk("exit_thread %i\n",current->pid); if(last_task_used_math == current) { /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } }
/* * Save the mach dependent part of the process state. */ void procsave(Proc *p) { uvlong t; cycles(&t); p->kentry -= t; p->pcycles += t; if(p->fpstate == FPactive){ if(p->state == Moribund) fpclear(); else{ /* * Fpsave() stores without handling pending * unmasked exeptions. Postnote() can't be called * here as sleep() already has up->rlock, so * the handling of pending exceptions is delayed * until the process runs again and generates an * emulation fault to activate the FPU. */ fpsave(&p->fpsave); } p->fpstate = FPinactive; } /* * While this processor is in the scheduler, the process could run * on another processor and exit, returning the page tables to * the free list where they could be reallocated and overwritten. * When this processor eventually has to get an entry from the * trashed page tables it will crash. * * If there's only one processor, this can't happen. * You might think it would be a win not to do this in that case, * especially on VMware, but it turns out not to matter. */ mmuflushtlb(PADDR(m->pdb)); }
void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { /* Sanity check... */ if(psr & PSR_PS) die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs); put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */ regs->psr |= PSR_EF; #ifndef CONFIG_SMP if(last_task_used_math == current) return; if(last_task_used_math) { /* Other processes fpu state, save away */ struct task_struct *fptask = last_task_used_math; fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); } last_task_used_math = current; if(current->used_math) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } else { /* Set initial sane state. */ fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } #else if(!current->used_math) { fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } else { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } current->flags |= PF_USEDFPU; #endif }
/* * Free current thread data structures etc.. */ void exit_thread(void) { #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } } void flush_thread(void) { current->thread.w_saved = 0; /* No new signal delivery by default */ current->thread.new_signal = 0; #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } /* Now, this task is no longer a kernel thread. */ current->thread.current_ds = USER_DS; if (current->thread.flags & SPARC_FLAG_KTHREAD) { current->thread.flags &= ~SPARC_FLAG_KTHREAD; /* We must fixup kregs as well. */ current->thread.kregs = (struct pt_regs *) (((unsigned long)current) + (TASK_UNION_SIZE - TRACEREG_SZ)); } } static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src) { __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t" "ldd\t[%1 + 0x08], %%g4\n\t" "ldd\t[%1 + 0x10], %%o4\n\t" "std\t%%g2, [%0 + 0x00]\n\t" "std\t%%g4, [%0 + 0x08]\n\t" "std\t%%o4, [%0 + 0x10]\n\t" "ldd\t[%1 + 0x18], %%g2\n\t" "ldd\t[%1 + 0x20], %%g4\n\t" "ldd\t[%1 + 0x28], %%o4\n\t" "std\t%%g2, [%0 + 0x18]\n\t" "std\t%%g4, [%0 + 0x20]\n\t" "std\t%%o4, [%0 + 0x28]\n\t" "ldd\t[%1 + 0x30], %%g2\n\t" "ldd\t[%1 + 0x38], %%g4\n\t" "ldd\t[%1 + 0x40], %%o4\n\t" "std\t%%g2, [%0 + 0x30]\n\t" "std\t%%g4, [%0 + 0x38]\n\t" "ldd\t[%1 + 0x48], %%g2\n\t" "std\t%%o4, [%0 + 0x40]\n\t" "std\t%%g2, [%0 + 0x48]\n\t" : : "r" (dst), "r" (src) : "g2", "g3", "g4", "g5", "o4", "o5"); }
/* * Call user, if necessary, with note. * Pass user the Ureg struct and the note on his stack. */ int notify(Ureg* ureg) { int l; ulong s, sp; Note *n; if(up->procctl) procctl(up); if(up->nnote == 0) return 0; if(up->fpstate == FPactive){ fpsave(&up->fpsave); up->fpstate = FPinactive; } up->fpstate |= FPillegal; s = spllo(); qlock(&up->debug); up->notepending = 0; n = &up->note[0]; if(strncmp(n->msg, "sys:", 4) == 0){ l = strlen(n->msg); if(l > ERRMAX-15) /* " pc=0x12345678\0" */ l = ERRMAX-15; sprint(n->msg+l, " pc=0x%.8lux", ureg->pc); } if(n->flag!=NUser && (up->notified || up->notify==0)){ if(n->flag == NDebug) pprint("suicide: %s\n", n->msg); qunlock(&up->debug); pexit(n->msg, n->flag!=NDebug); } if(up->notified){ qunlock(&up->debug); splhi(); return 0; } if(!up->notify){ qunlock(&up->debug); pexit(n->msg, n->flag!=NDebug); } sp = ureg->usp; sp -= 256; /* debugging: preserve context causing problem */ sp -= sizeof(Ureg); if(0) print("%s %lud: notify %.8lux %.8lux %.8lux %s\n", up->text, up->pid, ureg->pc, ureg->usp, sp, n->msg); if(!okaddr((ulong)up->notify, 1, 0) || !okaddr(sp-ERRMAX-4*BY2WD, sizeof(Ureg)+ERRMAX+4*BY2WD, 1)){ pprint("suicide: bad address in notify\n"); qunlock(&up->debug); pexit("Suicide", 0); } memmove((Ureg*)sp, ureg, sizeof(Ureg)); *(Ureg**)(sp-BY2WD) = up->ureg; /* word under Ureg is old up->ureg */ up->ureg = (void*)sp; sp -= BY2WD+ERRMAX; memmove((char*)sp, up->note[0].msg, ERRMAX); sp -= 3*BY2WD; *(ulong*)(sp+2*BY2WD) = sp+3*BY2WD; /* arg 2 is string */ *(ulong*)(sp+1*BY2WD) = (ulong)up->ureg; /* arg 1 is ureg* */ *(ulong*)(sp+0*BY2WD) = 0; /* arg 0 is pc */ ureg->usp = sp; ureg->pc = (ulong)up->notify; up->notified = 1; up->nnote--; memmove(&up->lastnote, &up->note[0], sizeof(Note)); memmove(&up->note[0], &up->note[1], up->nnote*sizeof(Note)); qunlock(&up->debug); splx(s); return 1; }
/* * Syscall is called directly from assembler without going through trap(). */ void syscall(Ureg* ureg) { char *e; ulong sp; long ret; int i, s; ulong scallnr; if((ureg->cs & 0xFFFF) != UESEL) panic("syscall: cs 0x%4.4luX", ureg->cs); cycles(&up->kentry); m->syscall++; up->insyscall = 1; up->pc = ureg->pc; up->dbgreg = ureg; if(up->procctl == Proc_tracesyscall){ up->procctl = Proc_stopme; procctl(up); } scallnr = ureg->ax; up->scallnr = scallnr; if(scallnr == RFORK && up->fpstate == FPactive){ fpsave(&up->fpsave); up->fpstate = FPinactive; } spllo(); sp = ureg->usp; up->nerrlab = 0; ret = -1; if(!waserror()){ if(scallnr >= nsyscall || systab[scallnr] == 0){ pprint("bad sys call number %lud pc %lux\n", scallnr, ureg->pc); postnote(up, 1, "sys: bad sys call", NDebug); error(Ebadarg); } if(sp<(USTKTOP-BY2PG) || sp>(USTKTOP-sizeof(Sargs)-BY2WD)) validaddr(sp, sizeof(Sargs)+BY2WD, 0); up->s = *((Sargs*)(sp+BY2WD)); up->psstate = sysctab[scallnr]; ret = systab[scallnr](up->s.args); poperror(); }else{ /* failure: save the error buffer for errstr */ e = up->syserrstr; up->syserrstr = up->errstr; up->errstr = e; if(0 && up->pid == 1) print("syscall %lud error %s\n", scallnr, up->syserrstr); } if(up->nerrlab){ print("bad errstack [%lud]: %d extra\n", scallnr, up->nerrlab); for(i = 0; i < NERR; i++) print("sp=%lux pc=%lux\n", up->errlab[i].sp, up->errlab[i].pc); panic("error stack"); } /* * Put return value in frame. On the x86 the syscall is * just another trap and the return value from syscall is * ignored. On other machines the return value is put into * the results register by caller of syscall. */ ureg->ax = ret; if(up->procctl == Proc_tracesyscall){ up->procctl = Proc_stopme; s = splhi(); procctl(up); splx(s); } up->insyscall = 0; up->psstate = 0; if(scallnr == NOTED) noted(ureg, *(ulong*)(sp+BY2WD)); if(scallnr!=RFORK && (up->procctl || up->nnote)){ splhi(); notify(ureg); } /* if we delayed sched because we held a lock, sched now */ if(up->delaysched) sched(); kexit(ureg); }
void copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct sparc_stackf *old_stack, *new_stack; unsigned long stack_offset; //flush_user_windows(); //printk ("copy_thread\n"); //show_regs(regs); if(last_task_used_math == current) { put_psr(get_psr() | PSR_EF); fpsave(&p->tss.float_regs[0], &p->tss.fsr, &p->tss.fpqueue[0], &p->tss.fpqdepth); } /* Calculate offset to stack_frame & pt_regs */ stack_offset = ((PAGE_SIZE ) - TRACEREG_SZ); /* * p->kernel_stack_page new_stack childregs * ! ! ! {if(PSR_PS) } * V V (stk.fr.) V (pt_regs) { (stk.fr.) } * +----- - - - - - ------+===========+============={+==========}+ */ if(regs->psr & PSR_PS) stack_offset -= REGWIN_SZ; childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset)); *childregs = *regs; new_stack = (((struct sparc_stackf *) childregs) - 1); old_stack = (((struct sparc_stackf *) regs) - 1); *new_stack = *old_stack; p->tss.ksp = p->saved_kernel_stack = (unsigned long) new_stack; p->tss.kpc = (((unsigned long) ret_sys_call) - 0x8); p->tss.kpsr = current->tss.fork_kpsr; p->tss.kwim = current->tss.fork_kwim; p->tss.kregs = childregs; childregs->u_regs[UREG_FP] = sp; if(regs->psr & PSR_PS) { stack_offset += TRACEREG_SZ; childregs->u_regs[UREG_FP] = p->kernel_stack_page + stack_offset; p->tss.flags |= SPARC_FLAG_KTHREAD; } else { struct sparc_stackf *childstack; struct sparc_stackf *parentstack; p->tss.flags &= ~SPARC_FLAG_KTHREAD; childstack = (struct sparc_stackf *) (sp & ~0x7UL); parentstack = (struct sparc_stackf *) regs->u_regs[UREG_FP]; if (childstack == parentstack) { //adapt the copy depth when after fork() parent pushes more stack frames. childstack = clone_stackframe(childstack, parentstack,3,1024); } else { childstack = clone_stackframe(childstack, parentstack,3,0); } childregs->u_regs[UREG_FP] = (unsigned long)childstack; /* printk("Parent stack\n"); __show_backtrace(parentstack); printk("Child stack\n"); __show_backtrace(childstack); */ } /* Set the return value for the child. */ childregs->u_regs[UREG_I0] = current->pid; childregs->u_regs[UREG_I1] = 1; /* Set the return value for the parent. */ regs->u_regs[UREG_I1] = 0; /* printk("Parent: (%i)\n",current->pid); show_regs(regs); printk("Child: (%i)\n",p->pid); show_regs(childregs); */ }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static int calls; siginfo_t info; unsigned long fsr; int ret = 0; #ifndef CONFIG_SMP struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef CONFIG_SMP if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr, &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth); #ifdef DEBUG_FPU printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr); #endif switch ((fpt->thread.fsr & 0x1c000)) { /* switch on the contents of the ftt [floating point trap type] field */ #ifdef DEBUG_FPU case (1 << 14): printk("IEEE_754_exception\n"); break; #endif case (2 << 14): /* unfinished_FPop (underflow & co) */ case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */ ret = do_mathemu(regs, fpt); break; #ifdef DEBUG_FPU case (4 << 14): printk("sequence_error (OS bug...)\n"); break; case (5 << 14): printk("hardware_error (uhoh!)\n"); break; case (6 << 14): printk("invalid_fp_register (user error)\n"); break; #endif /* DEBUG_FPU */ } /* If we successfully emulated the FPop, we pretend the trap never happened :-> */ if (ret) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); return; } /* nope, better SIGFPE the offending process... */ #ifdef CONFIG_SMP fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } fsr = fpt->thread.fsr; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)pc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } send_sig_info(SIGFPE, &info, fpt); #ifndef CONFIG_SMP last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void *)pc; info.si_trapno = 0; send_sig_info(SIGEMT, &info, current); }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static calls = 0; #ifndef __SMP__ struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef __SMP__ if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr, &fpt->tss.fpqueue[0], &fpt->tss.fpqdepth); fpt->tss.sig_address = pc; fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */ #ifdef __SMP__ fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } send_sig(SIGFPE, fpt, 1); #ifndef __SMP__ last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); current->tss.sig_address = pc; current->tss.sig_desc = SUBSIG_TAG; /* as good as any */ send_sig(SIGEMT, current, 1); } void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { #ifdef TRAP_DEBUG printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif if(psr & PSR_PS) panic("Tell me what a watchpoint trap is, and I'll then deal " "with such a beast..."); }