static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned long tmp; int err = 0; err |= copy_to_user(&sc->sc_regs.r0, ®s->r00, 32*sizeof(unsigned long)); err |= __put_user(regs->sa0, &sc->sc_regs.sa0); err |= __put_user(regs->lc0, &sc->sc_regs.lc0); err |= __put_user(regs->sa1, &sc->sc_regs.sa1); err |= __put_user(regs->lc1, &sc->sc_regs.lc1); err |= __put_user(regs->m0, &sc->sc_regs.m0); err |= __put_user(regs->m1, &sc->sc_regs.m1); err |= __put_user(regs->usr, &sc->sc_regs.usr); err |= __put_user(regs->preds, &sc->sc_regs.p3_0); err |= __put_user(regs->gp, &sc->sc_regs.gp); err |= __put_user(regs->ugp, &sc->sc_regs.ugp); tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc); tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause); tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva); return err; }
static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { int ret; unsigned int dummy; struct pt_regs *regs = task_pt_regs(target); if (!regs) return -EIO; /* The general idea here is that the copyout must happen in * exactly the same order in which the userspace expects these * regs. Now, the sequence in userspace does not match the * sequence in the kernel, so everything past the 32 gprs * happens one at a time. */ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, ®s->r00, 0, 32*sizeof(unsigned long)); #define ONEXT(KPT_REG, USR_REG) \ if (!ret) \ ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, \ KPT_REG, offsetof(struct user_regs_struct, USR_REG), \ offsetof(struct user_regs_struct, USR_REG) + \ sizeof(unsigned long)); /* Must be exactly same sequence as struct user_regs_struct */ ONEXT(®s->sa0, sa0); ONEXT(®s->lc0, lc0); ONEXT(®s->sa1, sa1); ONEXT(®s->lc1, lc1); ONEXT(®s->m0, m0); ONEXT(®s->m1, m1); ONEXT(®s->usr, usr); ONEXT(®s->preds, p3_0); ONEXT(®s->gp, gp); ONEXT(®s->ugp, ugp); ONEXT(&pt_elr(regs), pc); dummy = pt_cause(regs); ONEXT(&dummy, cause); ONEXT(&pt_badva(regs), badva); /* Pad the rest with zeros, if needed */ if (!ret) ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, offsetof(struct user_regs_struct, pad1), -1); return ret; }
void execute_protection_fault(struct pt_regs *regs) { unsigned long badvadr = pt_badva(regs); do_page_fault(badvadr, FLT_IFETCH, regs); }
void write_protection_fault(struct pt_regs *regs) { unsigned long badvadr = pt_badva(regs); do_page_fault(badvadr, FLT_STORE, regs); }
void read_protection_fault(struct pt_regs *regs) { unsigned long badvadr = pt_badva(regs); do_page_fault(badvadr, FLT_LOAD, regs); }