static inline int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) { int err = 0; #ifdef CONFIG_SMP if (test_tsk_thread_flag(current, TIF_USEDFPU)) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); regs->psr &= ~(PSR_EF); clear_tsk_thread_flag(current, TIF_USEDFPU); } #else if (current == last_task_used_math) { put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); last_task_used_math = NULL; regs->psr &= ~(PSR_EF); } #endif err |= __copy_to_user(&fpu->si_float_regs[0], ¤t->thread.float_regs[0], (sizeof(unsigned long) * 32)); err |= __put_user(current->thread.fsr, &fpu->si_fsr); err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); if (current->thread.fpqdepth != 0) err |= __copy_to_user(&fpu->si_fpqueue[0], ¤t->thread.fpqueue[0], ((sizeof(unsigned long) + (sizeof(unsigned long *)))*16)); clear_used_math(); return err; }
void flush_thread(void) { /* Make sure old user windows don't get in the way. */ flush_user_windows(); current->tss.w_saved = 0; current->tss.uwinmask = 0; current->tss.sig_address = 0; current->tss.sig_desc = 0; current->tss.sstk_info.cur_status = 0; current->tss.sstk_info.the_stack = 0; if(last_task_used_math == current) { /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } memset(¤t->tss.reg_window[0], 0, (sizeof(struct dummy_reg_window) * NSWINS)); memset(¤t->tss.rwbuf_stkptrs[0], 0, (sizeof(unsigned long) * NSWINS)); /* Now, this task is no longer a kernel thread. */ current->tss.flags &= ~SPARC_FLAG_KTHREAD; }
void cpu_put_psr(CPUState *env1, target_ulong val) { CPUState *saved_env; saved_env = env; env = env1; put_psr(val); env = saved_env; }
/* * Free current thread data structures etc.. */ void exit_thread(void) { //flush_user_windows(); //printk("exit_thread %i\n",current->pid); if(last_task_used_math == current) { /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->tss.float_regs[0], ¤t->tss.fsr, ¤t->tss.fpqueue[0], ¤t->tss.fpqdepth); last_task_used_math = NULL; } }
void do_fpd_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { /* Sanity check... */ if(psr & PSR_PS) die_if_kernel("Kernel gets FloatingPenguinUnit disabled trap", regs); put_psr(get_psr() | PSR_EF); /* Allow FPU ops. */ regs->psr |= PSR_EF; #ifndef CONFIG_SMP if(last_task_used_math == current) return; if(last_task_used_math) { /* Other processes fpu state, save away */ struct task_struct *fptask = last_task_used_math; fpsave(&fptask->thread.float_regs[0], &fptask->thread.fsr, &fptask->thread.fpqueue[0], &fptask->thread.fpqdepth); } last_task_used_math = current; if(current->used_math) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } else { /* Set initial sane state. */ fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } #else if(!current->used_math) { fpload(&init_fregs[0], &init_fsr); current->used_math = 1; } else { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); } current->flags |= PF_USEDFPU; #endif }
/* * Free current thread data structures etc.. */ void exit_thread(void) { #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Keep process from leaving FPU in a bogon state. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } } void flush_thread(void) { current->thread.w_saved = 0; /* No new signal delivery by default */ current->thread.new_signal = 0; #ifndef CONFIG_SMP if(last_task_used_math == current) { #else if(current->flags & PF_USEDFPU) { #endif /* Clean the fpu. */ put_psr(get_psr() | PSR_EF); fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); #ifndef CONFIG_SMP last_task_used_math = NULL; #else current->flags &= ~PF_USEDFPU; #endif } /* Now, this task is no longer a kernel thread. */ current->thread.current_ds = USER_DS; if (current->thread.flags & SPARC_FLAG_KTHREAD) { current->thread.flags &= ~SPARC_FLAG_KTHREAD; /* We must fixup kregs as well. */ current->thread.kregs = (struct pt_regs *) (((unsigned long)current) + (TASK_UNION_SIZE - TRACEREG_SZ)); } } static __inline__ void copy_regs(struct pt_regs *dst, struct pt_regs *src) { __asm__ __volatile__("ldd\t[%1 + 0x00], %%g2\n\t" "ldd\t[%1 + 0x08], %%g4\n\t" "ldd\t[%1 + 0x10], %%o4\n\t" "std\t%%g2, [%0 + 0x00]\n\t" "std\t%%g4, [%0 + 0x08]\n\t" "std\t%%o4, [%0 + 0x10]\n\t" "ldd\t[%1 + 0x18], %%g2\n\t" "ldd\t[%1 + 0x20], %%g4\n\t" "ldd\t[%1 + 0x28], %%o4\n\t" "std\t%%g2, [%0 + 0x18]\n\t" "std\t%%g4, [%0 + 0x20]\n\t" "std\t%%o4, [%0 + 0x28]\n\t" "ldd\t[%1 + 0x30], %%g2\n\t" "ldd\t[%1 + 0x38], %%g4\n\t" "ldd\t[%1 + 0x40], %%o4\n\t" "std\t%%g2, [%0 + 0x30]\n\t" "std\t%%g4, [%0 + 0x38]\n\t" "ldd\t[%1 + 0x48], %%g2\n\t" "std\t%%o4, [%0 + 0x40]\n\t" "std\t%%g2, [%0 + 0x48]\n\t" : : "r" (dst), "r" (src) : "g2", "g3", "g4", "g5", "o4", "o5"); }
void copy_thread(int nr, unsigned long clone_flags, unsigned long sp, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct sparc_stackf *old_stack, *new_stack; unsigned long stack_offset; //flush_user_windows(); //printk ("copy_thread\n"); //show_regs(regs); if(last_task_used_math == current) { put_psr(get_psr() | PSR_EF); fpsave(&p->tss.float_regs[0], &p->tss.fsr, &p->tss.fpqueue[0], &p->tss.fpqdepth); } /* Calculate offset to stack_frame & pt_regs */ stack_offset = ((PAGE_SIZE ) - TRACEREG_SZ); /* * p->kernel_stack_page new_stack childregs * ! ! ! {if(PSR_PS) } * V V (stk.fr.) V (pt_regs) { (stk.fr.) } * +----- - - - - - ------+===========+============={+==========}+ */ if(regs->psr & PSR_PS) stack_offset -= REGWIN_SZ; childregs = ((struct pt_regs *) (p->kernel_stack_page + stack_offset)); *childregs = *regs; new_stack = (((struct sparc_stackf *) childregs) - 1); old_stack = (((struct sparc_stackf *) regs) - 1); *new_stack = *old_stack; p->tss.ksp = p->saved_kernel_stack = (unsigned long) new_stack; p->tss.kpc = (((unsigned long) ret_sys_call) - 0x8); p->tss.kpsr = current->tss.fork_kpsr; p->tss.kwim = current->tss.fork_kwim; p->tss.kregs = childregs; childregs->u_regs[UREG_FP] = sp; if(regs->psr & PSR_PS) { stack_offset += TRACEREG_SZ; childregs->u_regs[UREG_FP] = p->kernel_stack_page + stack_offset; p->tss.flags |= SPARC_FLAG_KTHREAD; } else { struct sparc_stackf *childstack; struct sparc_stackf *parentstack; p->tss.flags &= ~SPARC_FLAG_KTHREAD; childstack = (struct sparc_stackf *) (sp & ~0x7UL); parentstack = (struct sparc_stackf *) regs->u_regs[UREG_FP]; if (childstack == parentstack) { //adapt the copy depth when after fork() parent pushes more stack frames. childstack = clone_stackframe(childstack, parentstack,3,1024); } else { childstack = clone_stackframe(childstack, parentstack,3,0); } childregs->u_regs[UREG_FP] = (unsigned long)childstack; /* printk("Parent stack\n"); __show_backtrace(parentstack); printk("Child stack\n"); __show_backtrace(childstack); */ } /* Set the return value for the child. */ childregs->u_regs[UREG_I0] = current->pid; childregs->u_regs[UREG_I1] = 1; /* Set the return value for the parent. */ regs->u_regs[UREG_I1] = 0; /* printk("Parent: (%i)\n",current->pid); show_regs(regs); printk("Child: (%i)\n",p->pid); show_regs(childregs); */ }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static int calls; siginfo_t info; unsigned long fsr; int ret = 0; #ifndef CONFIG_SMP struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef CONFIG_SMP if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->thread.float_regs[0], &fpt->thread.fsr, &fpt->thread.fpqueue[0], &fpt->thread.fpqdepth); #ifdef DEBUG_FPU printk("Hmm, FP exception, fsr was %016lx\n", fpt->thread.fsr); #endif switch ((fpt->thread.fsr & 0x1c000)) { /* switch on the contents of the ftt [floating point trap type] field */ #ifdef DEBUG_FPU case (1 << 14): printk("IEEE_754_exception\n"); break; #endif case (2 << 14): /* unfinished_FPop (underflow & co) */ case (3 << 14): /* unimplemented_FPop (quad stuff, maybe sqrt) */ ret = do_mathemu(regs, fpt); break; #ifdef DEBUG_FPU case (4 << 14): printk("sequence_error (OS bug...)\n"); break; case (5 << 14): printk("hardware_error (uhoh!)\n"); break; case (6 << 14): printk("invalid_fp_register (user error)\n"); break; #endif /* DEBUG_FPU */ } /* If we successfully emulated the FPop, we pretend the trap never happened :-> */ if (ret) { fpload(¤t->thread.float_regs[0], ¤t->thread.fsr); return; } /* nope, better SIGFPE the offending process... */ #ifdef CONFIG_SMP fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } fsr = fpt->thread.fsr; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)pc; info.si_trapno = 0; info.si_code = __SI_FAULT; if ((fsr & 0x1c000) == (1 << 14)) { if (fsr & 0x10) info.si_code = FPE_FLTINV; else if (fsr & 0x08) info.si_code = FPE_FLTOVF; else if (fsr & 0x04) info.si_code = FPE_FLTUND; else if (fsr & 0x02) info.si_code = FPE_FLTDIV; else if (fsr & 0x01) info.si_code = FPE_FLTRES; } send_sig_info(SIGFPE, &info, fpt); #ifndef CONFIG_SMP last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { siginfo_t info; if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); info.si_signo = SIGEMT; info.si_errno = 0; info.si_code = EMT_TAGOVF; info.si_addr = (void *)pc; info.si_trapno = 0; send_sig_info(SIGEMT, &info, current); }
void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { static calls = 0; #ifndef __SMP__ struct task_struct *fpt = last_task_used_math; #else struct task_struct *fpt = current; #endif put_psr(get_psr() | PSR_EF); /* If nobody owns the fpu right now, just clear the * error into our fake static buffer and hope it don't * happen again. Thank you crashme... */ #ifndef __SMP__ if(!fpt) { #else if(!(fpt->flags & PF_USEDFPU)) { #endif fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth); regs->psr &= ~PSR_EF; return; } fpsave(&fpt->tss.float_regs[0], &fpt->tss.fsr, &fpt->tss.fpqueue[0], &fpt->tss.fpqdepth); fpt->tss.sig_address = pc; fpt->tss.sig_desc = SUBSIG_FPERROR; /* as good as any */ #ifdef __SMP__ fpt->flags &= ~PF_USEDFPU; #endif if(psr & PSR_PS) { /* The first fsr store/load we tried trapped, * the second one will not (we hope). */ printk("WARNING: FPU exception from kernel mode. at pc=%08lx\n", regs->pc); regs->pc = regs->npc; regs->npc += 4; calls++; if(calls > 2) die_if_kernel("Too many Penguin-FPU traps from kernel mode", regs); return; } send_sig(SIGFPE, fpt, 1); #ifndef __SMP__ last_task_used_math = NULL; #endif regs->psr &= ~PSR_EF; if(calls > 0) calls=0; } void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { if(psr & PSR_PS) die_if_kernel("Penguin overflow trap from kernel mode", regs); current->tss.sig_address = pc; current->tss.sig_desc = SUBSIG_TAG; /* as good as any */ send_sig(SIGEMT, current, 1); } void handle_watchpoint(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { #ifdef TRAP_DEBUG printk("Watchpoint detected at PC %08lx NPC %08lx PSR %08lx\n", pc, npc, psr); #endif if(psr & PSR_PS) panic("Tell me what a watchpoint trap is, and I'll then deal " "with such a beast..."); }