static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs newregs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, sizeof(newregs)); if (ret) return ret; if (!valid_user_regs(&newregs)) return -EINVAL; *task_pt_regs(target) = newregs; return 0; }
/* * Copy the supplied NT_PRFPREG buffer to the floating-point context, * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 * bits only of FP context's general register slots. Only general * registers are copied. */ static int fpr_set_msa(struct task_struct *target, unsigned int *pos, unsigned int *count, const void **kbuf, const void __user **ubuf) { unsigned int i; u64 fpr_val; int err; BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { err = user_regset_copyin(pos, count, kbuf, ubuf, &fpr_val, i * sizeof(elf_fpreg_t), (i + 1) * sizeof(elf_fpreg_t)); if (err) return err; set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); } return 0; }
static int fpuregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct fpu_state_struct fpu_state = target->thread.fpu_state; int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fpu_state, 0, sizeof(fpu_state)); if (ret < 0) return ret; fpu_kill_state(target); target->thread.fpu_state = fpu_state; set_using_fpu(target); return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, sizeof(fpu_state), -1); }
/* * Copy the supplied NT_PRFPREG buffer to the floating-point context. * Choose the appropriate helper for general registers, and then copy * the FCSR register separately. Ignore the incoming FIR register * contents though, as the register is read-only. * * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', * which is supposed to have been guaranteed by the kernel before * calling us, e.g. in `ptrace_regset'. We enforce that requirement, * so that we can safely avoid preinitializing temporaries for * partial register writes. */ static int fpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); const int fir_pos = fcr31_pos + sizeof(u32); u32 fcr31; int err; BUG_ON(count % sizeof(elf_fpreg_t)); if (pos + count > sizeof(elf_fpregset_t)) return -EIO; init_fp_ctx(target); if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); else err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); if (err) return err; if (count > 0) { err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fcr31, fcr31_pos, fcr31_pos + sizeof(u32)); if (err) return err; ptrace_setfcr31(target, fcr31); } if (count > 0) err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, fir_pos, fir_pos + sizeof(u32)); return err; }
/* * update the contents of the Blackfin userspace general registers */ static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; /* Don't let people set SYSCFG (it's at the end of pt_regs) */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, PT_SYSCFG); if (ret < 0) return ret; /* This sucks ... */ target->thread.usp = regs->usp; /* regs->retx = regs->pc; */ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, PT_SYSCFG, -1); }
static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, PT_SYSCFG); if (ret < 0) return ret; target->thread.usp = regs->usp; return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, PT_SYSCFG, -1); }
static int gpr_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int ret; struct pt_regs regs; ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, -1); if (ret) return ret; regs.sr = task_pt_regs(target)->sr; #ifdef CONFIG_CPU_HAS_HILO regs.dcsr = task_pt_regs(target)->dcsr; #endif task_thread_info(target)->tp_value = regs.tls; *task_pt_regs(target) = regs; return 0; }
/* * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. * * We optimize for the case where `count % sizeof(int) == 0', which * is supposed to have been guaranteed by the kernel before calling * us, e.g. in `ptrace_regset'. We enforce that requirement, so * that we can safely avoid preinitializing temporaries for partial * mode writes. */ static int fp_mode_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int fp_mode; int err; BUG_ON(count % sizeof(int)); if (pos + count > sizeof(fp_mode)) return -EIO; err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, sizeof(fp_mode)); if (err) return err; if (count > 0) err = mips_set_process_fp_mode(target, fp_mode); return err; }
/* * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context. */ static int dsp64_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { unsigned int start, num_regs, i; u64 dspregs[NUM_DSP_REGS + 1]; int err; BUG_ON(count % sizeof(u64)); if (!cpu_has_dsp) return -EIO; start = pos / sizeof(u64); num_regs = count / sizeof(u64); if (start + num_regs > NUM_DSP_REGS + 1) return -EIO; err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, sizeof(dspregs)); if (err) return err; for (i = start; i < num_regs; i++) switch (i) { case 0 ... NUM_DSP_REGS - 1: target->thread.dsp.dspr[i] = dspregs[i]; break; case NUM_DSP_REGS: target->thread.dsp.dspcontrol = dspregs[i]; break; } return 0; }
static int regs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { int r; int ret; struct user_regs_struct regs; long *reg; /* build user regs in buffer */ for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) *reg++ = h8300_get_reg(target, r); ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0, sizeof(regs)); if (ret) return ret; /* write back to pt_regs */ for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++) h8300_put_reg(target, r, *reg++); return 0; }