void enable_kernel_fp(void) { #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) giveup_fpu(current); else giveup_fpu(NULL); /* just enables FP for kernel */ #else giveup_fpu(last_task_used_math); #endif /* CONFIG_SMP */ }
static int setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler) { int err = 0; if (regs->msr & MSR_FP) giveup_fpu(current); current->thread.saved_msr = regs->msr & ~(MSR_FP | MSR_FE0 | MSR_FE1); regs->msr = current->thread.saved_msr | current->thread.fpexc_mode; current->thread.saved_softe = regs->softe; err |= __put_user(&sc->gp_regs, &sc->regs); err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); err |= __put_user(signr, &sc->signal); err |= __put_user(handler, &sc->handler); if (set != NULL) err |= __put_user(set->sig[0], &sc->oldmask); regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1); current->thread.fpscr = 0; return err; }
/* * Make sure the floating-point register state in the * the thread_struct is up to date for task tsk. */ void flush_fp_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { /* * We need to disable preemption here because if we didn't, * another process could get scheduled after the regs->msr * test but before we have finished saving the FP registers * to the thread_struct. That process could take over the * FPU, and then when we get scheduled again we would store * bogus values for the remaining FP registers. */ preempt_disable(); if (tsk->thread.regs->msr & MSR_FP) { #ifdef CONFIG_SMP /* * This should only ever be called for current or * for a stopped child process. Since we save away * the FP register state on context switch on SMP, * there is something wrong if a stopped child appears * to still have its FP state in the CPU registers. */ BUG_ON(tsk != current); #endif giveup_fpu(tsk); } preempt_enable(); } }
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) { preempt_disable(); if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP)) giveup_fpu(tsk); preempt_enable(); memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); return 1; }
/* * Save the current user registers on the user stack. * We only save the altivec registers if the process has used * altivec instructions at some point. */ static int save_user_regs(struct pt_regs *regs, struct mcontext *frame, int sigret) { /* save general and floating-point registers */ CHECK_FULL_REGS(regs); preempt_disable(); if (regs->msr & MSR_FP) giveup_fpu(current); #ifdef CONFIG_ALTIVEC if (current->thread.used_vr && (regs->msr & MSR_VEC)) giveup_altivec(current); #endif /* CONFIG_ALTIVEC */ preempt_enable(); if (__copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE) || __copy_to_user(&frame->mc_fregs, current->thread.fpr, ELF_NFPREG * sizeof(double))) return 1; current->thread.fpscr = 0; /* turn off all fp exceptions */ #ifdef CONFIG_ALTIVEC /* save altivec registers */ if (current->thread.used_vr) { if (__copy_to_user(&frame->mc_vregs, current->thread.vr, ELF_NVRREG * sizeof(vector128))) return 1; /* set MSR_VEC in the saved MSR value to indicate that frame->mc_vregs contains valid data */ if (__put_user(regs->msr | MSR_VEC, &frame->mc_gregs[PT_MSR])) return 1; } /* else assert((regs->msr & MSR_VEC) == 0) */ /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. Since VSCR only contains 32 bits saved in the least * significant bits of a vector, we "cheat" and stuff VRSAVE in the * most significant bits of that same vector. --BenH */ if (__put_user(current->thread.vrsave, (u32 *)&frame->mc_vregs[32])) return 1; #endif /* CONFIG_ALTIVEC */ if (sigret) { /* Set up the sigreturn trampoline: li r0,sigret; sc */ if (__put_user(0x38000000UL + sigret, &frame->tramp[0]) || __put_user(0x44000002UL, &frame->tramp[1])) return 1; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); } return 0; }
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) { #ifdef __SMP__ if ( regs->msr & MSR_FP ) smp_giveup_fpu(current); #else if (last_task_used_math == current) giveup_fpu(); #endif memcpy(fpregs, ¤t->tss.fpr[0], sizeof(*fpregs)); return 1; }
int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs) { struct pt_regs *regs = tsk->thread.regs; if (!regs) return 0; if (tsk == current && (regs->msr & MSR_FP)) giveup_fpu(current); memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs)); return 1; }
long vmadump_store_cpu(cr_chkpt_proc_req_t *ctx, struct file *file, struct pt_regs *regs) { long bytes = 0, r; /* Store struct pt_regs */ r = write_kern(ctx, file, regs, sizeof(*regs)); if (r != sizeof(*regs)) goto err; bytes += r; /* Floating point regs */ if (regs->msr & MSR_FP) giveup_fpu(current); r = write_kern(ctx, file, ¤t->thread.fpr, sizeof(current->thread.fpr)); if (r != sizeof(current->thread.fpr)) goto err; bytes += r; r = write_kern(ctx, file, ¤t->thread.fpscr, sizeof(current->thread.fpscr)); if (r != sizeof(current->thread.fpscr)) goto err; bytes += r; #if HAVE_THREAD_VDSO_BASE /* unconditionally store the base of the VDSO library */ r = write_kern(ctx, file, ¤t->thread.vdso_base, sizeof(current->thread.vdso_base)); if (r != sizeof(current->thread.vdso_base)) goto err; bytes += r; #endif #ifdef CONFIG_ALTIVEC /* XXX I really need to find out if this is right */ if (regs->msr & MSR_VEC) giveup_altivec(current); r = write_kern(ctx, file, ¤t->thread.vr, sizeof(current->thread.vr)); if (r != sizeof(current->thread.vr)) goto err; bytes += r; r = write_kern(ctx, file, ¤t->thread.vscr, sizeof(current->thread.vscr)); if (r != sizeof(current->thread.vscr)) goto err; bytes += r; #endif return bytes; err: if (r >= 0) r = -EIO; return r; }
static int restore_sigcontext(struct pt_regs *regs, sigset_t *set, struct sigcontext *sc) { unsigned int err = 0; if (regs->msr & MSR_FP) giveup_fpu(current); err |= __copy_from_user(regs, &sc->gp_regs, GP_REGS_SIZE); err |= __copy_from_user(¤t->thread.fpr, &sc->fp_regs, FP_REGS_SIZE); current->thread.fpexc_mode = regs->msr & (MSR_FE0 | MSR_FE1); if (set != NULL) err |= __get_user(set->sig[0], &sc->oldmask); /* Don't allow the signal handler to change these modulo FE{0,1} */ regs->msr = current->thread.saved_msr & ~(MSR_FP | MSR_FE0 | MSR_FE1); regs->softe = current->thread.saved_softe; return err; }
static void parse_fpe(struct pt_regs *regs) { siginfo_t info; unsigned long fpscr; if (regs->msr & MSR_FP) giveup_fpu(current); fpscr = current->thread.fpscr; /* Invalid operation */ if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX)) info.si_code = FPE_FLTINV; /* Overflow */ else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX)) info.si_code = FPE_FLTOVF; /* Underflow */ else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX)) info.si_code = FPE_FLTUND; /* Divide by zero */ else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX)) info.si_code = FPE_FLTDIV; /* Inexact result */ else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX)) info.si_code = FPE_FLTRES; else info.si_code = 0; info.si_signo = SIGFPE; info.si_errno = 0; info.si_addr = (void *)regs->nip; _exception(SIGFPE, &info, regs); }
void AlignmentException(struct pt_regs *regs) { int fixed; #ifdef __SMP__ if (regs->msr & MSR_FP ) smp_giveup_fpu(current); #else if (last_task_used_math == current) giveup_fpu(); #endif fixed = fix_alignment(regs); if (fixed == 1) { regs->nip += 4; /* skip over emulated instruction */ return; } if (fixed == -EFAULT) { /* fixed == -EFAULT means the operand address was bad */ bad_page_fault(regs, regs->dar); return; } _exception(SIGBUS, regs); }
/* * Do a signal return; undo the signal stack. */ long sys_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct sigcontext_struct *sc, sigctx; struct sigregs *sr; long ret; elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */ sigset_t set; unsigned long prevsp; sc = (struct sigcontext_struct *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; set.sig[0] = sigctx.oldmask; #if _NSIG_WORDS > 1 set.sig[1] = sigctx._unused[3]; #endif sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sigmask_lock); current->blocked = set; recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); sc++; /* Look at next sigcontext */ if (sc == (struct sigcontext_struct *)(sigctx.regs)) { /* Last stacked signal - restore registers */ sr = (struct sigregs *) sigctx.regs; if (regs->msr & MSR_FP ) giveup_fpu(current); if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs))) goto badframe; saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE) | (saved_regs[PT_MSR] & MSR_USERCHANGE); saved_regs[PT_SOFTE] = regs->softe; memcpy(regs, saved_regs, GP_REGS_SIZE); if (copy_from_user(current->thread.fpr, &sr->fp_regs, sizeof(sr->fp_regs))) goto badframe; ret = regs->result; } else { /* More signals to go */ regs->gpr[1] = (unsigned long)sc - __SIGNAL_FRAMESIZE; if (copy_from_user(&sigctx, sc, sizeof(sigctx))) goto badframe; sr = (struct sigregs *) sigctx.regs; regs->gpr[3] = ret = sigctx.signal; regs->gpr[4] = (unsigned long) sc; regs->link = (unsigned long) &sr->tramp; regs->nip = sigctx.handler; if (get_user(prevsp, &sr->gp_regs[PT_R1]) || put_user(prevsp, (unsigned long *) regs->gpr[1])) goto badframe; } return ret; badframe: do_exit(SIGSEGV); }
void giveup_vsx(struct task_struct *tsk) { giveup_fpu(tsk); giveup_altivec(tsk); __giveup_vsx(tsk); }
int sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; int ret = -EPERM; lock_kernel(); if (request == PTRACE_TRACEME) { /* are we already being traced? */ if (current->ptrace & PT_PTRACED) goto out; ret = security_ptrace(current->parent, current); if (ret) goto out; /* set the ptrace bit in the process flags. */ current->ptrace |= PT_PTRACED; ret = 0; goto out; } ret = -ESRCH; read_lock(&tasklist_lock); child = find_task_by_pid(pid); if (child) get_task_struct(child); read_unlock(&tasklist_lock); if (!child) goto out; ret = -EPERM; if (pid == 1) /* you may not mess with init */ goto out_tsk; if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; } ret = ptrace_check_attach(child, request == PTRACE_KILL); if (ret < 0) goto out_tsk; switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; int copied; copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); ret = -EIO; if (copied != sizeof(tmp)) break; ret = put_user(tmp,(unsigned long __user *) data); break; } /* read the word at location addr in the USER area. */ /* XXX this will need fixing for 64-bit */ case PTRACE_PEEKUSR: { unsigned long index, tmp; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || index > PT_FPSCR || child->thread.regs == NULL) break; CHECK_FULL_REGS(child->thread.regs); if (index < PT_FPR0) { tmp = get_reg(child, (int) index); } else { preempt_disable(); if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); preempt_enable(); tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; } ret = put_user(tmp,(unsigned long __user *) data); break; } /* If I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = 0; if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) break; ret = -EIO; break; /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 2; if ((addr & 3) || index > PT_FPSCR || child->thread.regs == NULL) break; CHECK_FULL_REGS(child->thread.regs); if (index == PT_ORIG_R3) break; if (index < PT_FPR0) { ret = put_reg(child, index, data); } else { preempt_disable(); if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); preempt_enable(); ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; ret = 0; } break; } case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) { set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); } else { clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); } child->exit_code = data; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); ret = 0; break; } /* * make the child exit. Best I can do is send it a sigkill. * perhaps it should be put in the status that it wants to * exit. */ case PTRACE_KILL: { ret = 0; if (child->exit_state == EXIT_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); break; } case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); child->exit_code = data; /* give it a chance to run. */ wake_up_process(child); ret = 0; break; } case PTRACE_DETACH: ret = ptrace_detach(child, data); break; #ifdef CONFIG_ALTIVEC case PTRACE_GETVRREGS: /* Get the child altivec register state. */ preempt_disable(); if (child->thread.regs->msr & MSR_VEC) giveup_altivec(child); preempt_enable(); ret = get_vrregs((unsigned long __user *)data, child); break; case PTRACE_SETVRREGS: /* Set the child altivec register state. */ /* this is to clear the MSR_VEC bit to force a reload * of register state from memory */ preempt_disable(); if (child->thread.regs->msr & MSR_VEC) giveup_altivec(child); preempt_enable(); ret = set_vrregs(child, (unsigned long __user *)data); break; #endif #ifdef CONFIG_SPE case PTRACE_GETEVRREGS: /* Get the child spe register state. */ if (child->thread.regs->msr & MSR_SPE) giveup_spe(child); ret = get_evrregs((unsigned long __user *)data, child); break; case PTRACE_SETEVRREGS: /* Set the child spe register state. */ /* this is to clear the MSR_SPE bit to force a reload * of register state from memory */ if (child->thread.regs->msr & MSR_SPE) giveup_spe(child); ret = set_evrregs(child, (unsigned long __user *)data); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } out_tsk: put_task_struct(child); out: unlock_kernel(); return ret; }
int sys_ptrace(long request, long pid, long addr, long data) { struct task_struct *child; int ret = -EPERM; lock_kernel(); if (request == PTRACE_TRACEME) { /* are we already being traced? */ if (current->ptrace & PT_PTRACED) goto out; ret = security_ptrace(current->parent, current); if (ret) goto out; /* set the ptrace bit in the process flags. */ current->ptrace |= PT_PTRACED; ret = 0; goto out; } ret = -ESRCH; read_lock(&tasklist_lock); child = find_task_by_pid(pid); if (child) get_task_struct(child); read_unlock(&tasklist_lock); if (!child) goto out; ret = -EPERM; if (pid == 1) /* you may not mess with init */ goto out_tsk; if (request == PTRACE_ATTACH) { ret = ptrace_attach(child); goto out_tsk; } ret = ptrace_check_attach(child, request == PTRACE_KILL); if (ret < 0) goto out_tsk; switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; int copied; copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); ret = -EIO; if (copied != sizeof(tmp)) break; ret = put_user(tmp,(unsigned long __user *) data); break; } /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long index; unsigned long tmp; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) break; if (index < PT_FPR0) { tmp = get_reg(child, (int)index); } else { if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; } ret = put_user(tmp,(unsigned long __user *) data); break; } /* If I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = 0; if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) break; ret = -EIO; break; /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ index = (unsigned long) addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) break; if (index == PT_ORIG_R3) break; if (index < PT_FPR0) { ret = put_reg(child, index, data); } else { if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; ret = 0; } break; } case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; if ((unsigned long) data > _NSIG) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); child->exit_code = data; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); ret = 0; break; } /* * make the child exit. Best I can do is send it a sigkill. * perhaps it should be put in the status that it wants to * exit. */ case PTRACE_KILL: { ret = 0; if (child->state == TASK_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); break; } case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; if ((unsigned long) data > _NSIG) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); child->exit_code = data; /* give it a chance to run. */ wake_up_process(child); ret = 0; break; } case PTRACE_DETACH: ret = ptrace_detach(child, data); break; case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; if (child->thread.regs->msr & MSR_FP) giveup_fpu(child); for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } default: ret = ptrace_request(child, request, addr, data); break; } out_tsk: put_task_struct(child); out: unlock_kernel(); return ret; }
/* * Set up a signal frame. */ static void setup_frame(struct pt_regs *regs, struct sigregs *frame, unsigned long newsp) { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ struct funct_descr_entry { unsigned long entry; unsigned long toc; }; struct funct_descr_entry * funct_desc_ptr; unsigned long temp_ptr; struct sigcontext_struct *sc = (struct sigcontext_struct *) newsp; if (verify_area(VERIFY_WRITE, frame, sizeof(*frame))) goto badframe; if (regs->msr & MSR_FP) giveup_fpu(current); if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE) || __copy_to_user(&frame->fp_regs, current->thread.fpr, ELF_NFPREG * sizeof(double)) || __put_user(0x38000000UL + __NR_sigreturn, &frame->tramp[0]) /* li r0, __NR_sigreturn */ || __put_user(0x44000002UL, &frame->tramp[1])) /* sc */ goto badframe; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); newsp -= __SIGNAL_FRAMESIZE; if ( get_user(temp_ptr, &sc->handler)) goto badframe; funct_desc_ptr = ( struct funct_descr_entry *) temp_ptr; if (put_user(regs->gpr[1], (unsigned long *)newsp) || get_user(regs->nip, & funct_desc_ptr ->entry) || get_user(regs->gpr[2],& funct_desc_ptr->toc) || get_user(regs->gpr[3], &sc->signal)) goto badframe; regs->gpr[1] = newsp; regs->gpr[4] = (unsigned long) sc; regs->link = (unsigned long) frame->tramp; PPCDBG(PPCDBG_SIGNAL, "setup_frame - returning - regs->gpr[1]=%lx, regs->gpr[4]=%lx, regs->link=%lx \n", regs->gpr[1], regs->gpr[4], regs->link); return; badframe: PPCDBG(PPCDBG_SIGNAL, "setup_frame - badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); PPCDBG_ENTER_DEBUGGER(); #if DEBUG_SIG printk("badframe in setup_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif do_exit(SIGSEGV); }
static void setup_rt_frame(struct pt_regs *regs, struct sigregs *frame, signed long newsp) { struct rt_sigframe *rt_sf = (struct rt_sigframe *) newsp; /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ struct funct_descr_entry { unsigned long entry; unsigned long toc; }; struct funct_descr_entry * funct_desc_ptr; unsigned long temp_ptr; /* Set up preamble frame */ if (verify_area(VERIFY_WRITE, frame, sizeof(*frame))) goto badframe; if (regs->msr & MSR_FP) giveup_fpu(current); if (__copy_to_user(&frame->gp_regs, regs, GP_REGS_SIZE) || __copy_to_user(&frame->fp_regs, current->thread.fpr, ELF_NFPREG * sizeof(double)) || __put_user(0x38000000UL + __NR_rt_sigreturn, &frame->tramp[0]) /* li r0, __NR_rt_sigreturn */ || __put_user(0x44000002UL, &frame->tramp[1])) /* sc */ goto badframe; flush_icache_range((unsigned long) &frame->tramp[0], (unsigned long) &frame->tramp[2]); /* Retrieve rt_sigframe from stack and set up registers for signal handler */ newsp -= __SIGNAL_FRAMESIZE; if ( get_user(temp_ptr, &rt_sf->uc.uc_mcontext.handler)) { goto badframe; } funct_desc_ptr = ( struct funct_descr_entry *) temp_ptr; if (put_user(regs->gpr[1], (unsigned long *)newsp) || get_user(regs->nip, &funct_desc_ptr->entry) || get_user(regs->gpr[2], &funct_desc_ptr->toc) || get_user(regs->gpr[3], &rt_sf->uc.uc_mcontext.signal) || get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo) || get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc)) goto badframe; regs->gpr[1] = newsp; regs->gpr[6] = (unsigned long) rt_sf; regs->link = (unsigned long) frame->tramp; return; badframe: #if DEBUG_SIG printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif do_exit(SIGSEGV); }
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct rt_sigframe *rt_sf; struct sigcontext_struct sigctx; struct sigregs *sr; int ret; elf_gregset_t saved_regs; /* an array of ELF_NGREG unsigned longs */ sigset_t set; stack_t st; unsigned long prevsp; rt_sf = (struct rt_sigframe *)(regs->gpr[1] + __SIGNAL_FRAMESIZE); if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx)) || copy_from_user(&set, &rt_sf->uc.uc_sigmask, sizeof(set)) || copy_from_user(&st, &rt_sf->uc.uc_stack, sizeof(st))) goto badframe; sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sigmask_lock); current->blocked = set; recalc_sigpending(current); spin_unlock_irq(¤t->sigmask_lock); rt_sf++; /* Look at next rt_sigframe */ if (rt_sf == (struct rt_sigframe *)(sigctx.regs)) { /* Last stacked signal - restore registers - * sigctx is initialized to point to the * preamble frame (where registers are stored) * see handle_signal() */ sr = (struct sigregs *) sigctx.regs; if (regs->msr & MSR_FP ) giveup_fpu(current); if (copy_from_user(saved_regs, &sr->gp_regs, sizeof(sr->gp_regs))) goto badframe; saved_regs[PT_MSR] = (regs->msr & ~MSR_USERCHANGE) | (saved_regs[PT_MSR] & MSR_USERCHANGE); saved_regs[PT_SOFTE] = regs->softe; memcpy(regs, saved_regs, GP_REGS_SIZE); if (copy_from_user(current->thread.fpr, &sr->fp_regs, sizeof(sr->fp_regs))) goto badframe; /* This function sets back the stack flags into the current task structure. */ sys_sigaltstack(&st, NULL); ret = regs->result; } else { /* More signals to go */ /* Set up registers for next signal handler */ regs->gpr[1] = (unsigned long)rt_sf - __SIGNAL_FRAMESIZE; if (copy_from_user(&sigctx, &rt_sf->uc.uc_mcontext, sizeof(sigctx))) goto badframe; sr = (struct sigregs *) sigctx.regs; regs->gpr[3] = ret = sigctx.signal; /* Get the siginfo */ get_user(regs->gpr[4], (unsigned long *)&rt_sf->pinfo); /* Get the ucontext */ get_user(regs->gpr[5], (unsigned long *)&rt_sf->puc); regs->gpr[6] = (unsigned long) rt_sf; regs->link = (unsigned long) &sr->tramp; regs->nip = sigctx.handler; if (get_user(prevsp, &sr->gp_regs[PT_R1]) || put_user(prevsp, (unsigned long *) regs->gpr[1])) goto badframe; } return ret; badframe: do_exit(SIGSEGV); }