static int restore_fpsimd_context(struct fpsimd_context __user *ctx) { struct fpsimd_state fpsimd; __u32 magic, size; int err = 0; /* check the magic/size information */ __get_user_error(magic, &ctx->head.magic, err); __get_user_error(size, &ctx->head.size, err); if (err) return -EFAULT; if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ err = __copy_from_user(fpsimd.vregs, ctx->vregs, sizeof(fpsimd.vregs)); __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); /* load the hardware registers from the fpsimd_state structure */ if (!err) fpsimd_update_current_state(&fpsimd); return err ? -EFAULT : 0; }
int vfp_restore_user_hwstate(struct user_vfp __user *ufp, struct user_vfp_exc __user *ufp_exc) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; unsigned long fpexc; int err = 0; vfp_flush_hwstate(thread); err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs)); __get_user_error(hwstate->fpscr, &ufp->fpscr, err); __get_user_error(fpexc, &ufp_exc->fpexc, err); fpexc |= FPEXC_EN; fpexc &= ~(FPEXC_EX | FPEXC_FP2V); hwstate->fpexc = fpexc; __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); return err ? -EFAULT : 0; }
static int restore_sigframe(struct pt_regs *regs, struct rt_sigframe __user *sf) { sigset_t set; int i, err; struct aux_context __user *aux = (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) set_current_blocked(&set); for (i = 0; i < 31; i++) __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], err); __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); /* * Avoid sys_rt_sigreturn() restarting. */ regs->syscallno = ~0UL; err |= !valid_user_regs(®s->user_regs); if (err == 0) err |= restore_fpsimd_context(&aux->fpsimd); return err; }
static int restore_sigcontext(struct sigcontext *sc) { int err = 0; struct exregs_regs regs; struct restore_sigframe __user *restore; printk_dbg("%s called\n", __func__); /* User registers are saved/restored in user.S (__wombat_user_xxx) */ __get_user_error(regs.sp, &sc->arm_sp, err); __get_user_error(regs.ip, &sc->arm_pc, err); __get_user_error(regs.flags, &sc->arm_cpsr, err); restore = (void*)(regs.sp - sizeof(struct restore_sigframe)); __put_user_error(regs.ip, &restore->ret_ip, err); if (!err) { set_need_restart(current_thread_info(), (unsigned long)TASK_SIG_BASE + (((unsigned long)&__wombat_user_sigrestore) & ~PAGE_MASK), (unsigned long)sc, regs.flags); L4_Stop_Thread(current_thread_info()->user_tid); set_user_ipc_cancelled(current_thread_info()); } return err; }
static int copy_locked(void __user *uptr, void *kptr, size_t size, int write, void (*copyfn)(void *, void __user *)) { unsigned char v, __user *userptr = uptr; int err = 0; do { struct mm_struct *mm; if (write) { __put_user_error(0, userptr, err); __put_user_error(0, userptr + size - 1, err); } else { __get_user_error(v, userptr, err); __get_user_error(v, userptr + size - 1, err); } if (err) break; mm = current->mm; spin_lock(&mm->page_table_lock); if (page_present(mm, userptr, write) && page_present(mm, userptr + size - 1, write)) { copyfn(kptr, uptr); } else err = 1; spin_unlock(&mm->page_table_lock); } while (err); return err; }
static int restore_vfp_context(struct vfp_sigframe __user *frame) { unsigned long magic; unsigned long size; int err = 0; __get_user_error(magic, &frame->magic, err); __get_user_error(size, &frame->size, err); if (err) return -EFAULT; if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc); }
static inline int restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc) { struct task_struct *tsk = current; unsigned long used_math_flag; int ret = 0; clear_used_math(); __get_user_error(used_math_flag, &sc->used_math_flag, ret); if (!used_math_flag) return 0; set_used_math(); #if IS_ENABLED(CONFIG_LAZY_FPU) preempt_disable(); if (current == last_task_used_math) { last_task_used_math = NULL; disable_ptreg_fpu(regs); } preempt_enable(); #else clear_fpu(regs); #endif return __copy_from_user(&tsk->thread.fpu, &sc->fpu, sizeof(struct fpu_struct)); }
/* Sanitise and restore the current VFP state from the provided structures. */ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, struct user_vfp_exc __user *ufp_exc) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *hwstate = &thread->vfpstate.hard; unsigned long fpexc; int err = 0; /* * If VFP has been used, then disable it to avoid corrupting * the new thread state. */ if (hwstate->fpexc & FPEXC_EN) vfp_flush_hwstate(thread); /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ err |= __copy_from_user(&hwstate->fpregs, &ufp->fpregs, sizeof(hwstate->fpregs)); /* * Copy the status and control register. */ __get_user_error(hwstate->fpscr, &ufp->fpscr, err); /* * Sanitise and restore the exception registers. */ __get_user_error(fpexc, &ufp_exc->fpexc, err); /* Ensure the VFP is enabled. */ fpexc |= FPEXC_EN; /* Ensure FPINST2 is invalid and the exception flag is cleared. */ fpexc &= ~(FPEXC_EX | FPEXC_FP2V); hwstate->fpexc = fpexc; __get_user_error(hwstate->fpinst, &ufp_exc->fpinst, err); __get_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err); return err ? -EFAULT : 0; }
static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) { unsigned long magic0, magic1; int err = 0; /* the iWMMXt context is 64 bit aligned */ WARN_ON((unsigned long)frame & 7); /* * Validate iWMMXt context signature. * Also, iwmmxt_task_restore() doesn't check user permissions. * Let's do a dummy write on the upper boundary to ensure * access to user mem is OK all way up. */ __get_user_error(magic0, &frame->magic0, err); __get_user_error(magic1, &frame->magic1, err); if (!err && magic0 == IWMMXT_MAGIC0 && magic1 == IWMMXT_MAGIC1) err = copy_locked(&frame->storage, current_thread_info(), sizeof(frame->storage), 0, iwmmxt_task_restore); return err; }
asmlinkage long sys_oabi_semtimedop(int semid, struct oabi_sembuf __user *tsops, unsigned nsops, const struct timespec __user *timeout) { struct sembuf *sops; struct timespec local_timeout; long err; int i; if (nsops < 1 || nsops > SEMOPM) return -EINVAL; if (!access_ok(VERIFY_READ, tsops, sizeof(*tsops) * nsops)) return -EFAULT; sops = kmalloc(sizeof(*sops) * nsops, GFP_KERNEL); if (!sops) return -ENOMEM; err = 0; for (i = 0; i < nsops; i++) { __get_user_error(sops[i].sem_num, &tsops->sem_num, err); __get_user_error(sops[i].sem_op, &tsops->sem_op, err); __get_user_error(sops[i].sem_flg, &tsops->sem_flg, err); tsops++; } if (timeout) { /* copy this as well before changing domain protection */ err |= copy_from_user(&local_timeout, timeout, sizeof(*timeout)); timeout = &local_timeout; } if (err) { err = -EFAULT; } else { mm_segment_t fs = get_fs(); set_fs(KERNEL_DS); err = sys_semtimedop(semid, sops, nsops, timeout); set_fs(fs); } kfree(sops); return err; }
static int restore_vfp_context(struct vfp_sigframe __user *frame) { struct thread_info *thread = current_thread_info(); struct vfp_hard_struct *h = &thread->vfpstate.hard; unsigned long magic; unsigned long size; unsigned long fpexc; int err = 0; __get_user_error(magic, &frame->magic, err); __get_user_error(size, &frame->size, err); if (err) return -EFAULT; if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) return -EINVAL; /* * Copy the floating point registers. There can be unused * registers see asm/hwcap.h for details. */ err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, sizeof(h->fpregs)); /* * Copy the status and control register. */ __get_user_error(h->fpscr, &frame->ufp.fpscr, err); /* * Sanitise and restore the exception registers. */ __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); /* Ensure the VFP is enabled. */ fpexc |= FPEXC_EN; /* Ensure FPINST2 is invalid and the exception flag is cleared. */ fpexc &= ~(FPEXC_EX | FPEXC_FP2V); h->fpexc = fpexc; __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); if (!err) vfp_flush_hwstate(thread); return err ? -EFAULT : 0; }
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { struct aux_sigframe __user *aux; sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) set_current_blocked(&set); __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); err |= !valid_user_regs(regs); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= restore_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP if (err == 0) err |= restore_vfp_context(&aux->vfp); #endif return err; }
static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { int err = 0; __get_user_error(regs->ARM_r0, &sc->arm_r0, err); __get_user_error(regs->ARM_r1, &sc->arm_r1, err); __get_user_error(regs->ARM_r2, &sc->arm_r2, err); __get_user_error(regs->ARM_r3, &sc->arm_r3, err); __get_user_error(regs->ARM_r4, &sc->arm_r4, err); __get_user_error(regs->ARM_r5, &sc->arm_r5, err); __get_user_error(regs->ARM_r6, &sc->arm_r6, err); __get_user_error(regs->ARM_r7, &sc->arm_r7, err); __get_user_error(regs->ARM_r8, &sc->arm_r8, err); __get_user_error(regs->ARM_r9, &sc->arm_r9, err); __get_user_error(regs->ARM_r10, &sc->arm_r10, err); __get_user_error(regs->ARM_fp, &sc->arm_fp, err); __get_user_error(regs->ARM_ip, &sc->arm_ip, err); __get_user_error(regs->ARM_sp, &sc->arm_sp, err); __get_user_error(regs->ARM_lr, &sc->arm_lr, err); __get_user_error(regs->ARM_pc, &sc->arm_pc, err); __get_user_error(regs->ARM_cpsr, &sc->arm_cpsr, err); err |= !valid_user_regs(regs); return err; }
static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) { struct aux_sigframe __user *aux; sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) { sigdelsetmask(&set, ~_BLOCKABLE); spin_lock_irq(¤t->sighand->siglock); current->blocked = set; recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); } __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err); __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err); __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err); __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err); __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err); __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err); __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err); __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err); __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err); __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err); __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err); __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err); __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err); __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err); __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err); err |= !valid_user_regs(regs); aux = (struct aux_sigframe __user *) sf->uc.uc_regspace; #ifdef CONFIG_CRUNCH if (err == 0) err |= restore_crunch_context(&aux->crunch); #endif #ifdef CONFIG_IWMMXT if (err == 0 && test_thread_flag(TIF_USING_IWMMXT)) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP // if (err == 0) // err |= vfp_restore_state(&sf->aux.vfp); #endif return err; }
static int restore_sigframe(struct pt_regs *regs, struct rt_sigframe __user * sf) { sigset_t set; int err; err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); if (err == 0) { set_current_blocked(&set); } __get_user_error(regs->uregs[0], &sf->uc.uc_mcontext.nds32_r0, err); __get_user_error(regs->uregs[1], &sf->uc.uc_mcontext.nds32_r1, err); __get_user_error(regs->uregs[2], &sf->uc.uc_mcontext.nds32_r2, err); __get_user_error(regs->uregs[3], &sf->uc.uc_mcontext.nds32_r3, err); __get_user_error(regs->uregs[4], &sf->uc.uc_mcontext.nds32_r4, err); __get_user_error(regs->uregs[5], &sf->uc.uc_mcontext.nds32_r5, err); __get_user_error(regs->uregs[6], &sf->uc.uc_mcontext.nds32_r6, err); __get_user_error(regs->uregs[7], &sf->uc.uc_mcontext.nds32_r7, err); __get_user_error(regs->uregs[8], &sf->uc.uc_mcontext.nds32_r8, err); __get_user_error(regs->uregs[9], &sf->uc.uc_mcontext.nds32_r9, err); __get_user_error(regs->uregs[10], &sf->uc.uc_mcontext.nds32_r10, err); __get_user_error(regs->uregs[11], &sf->uc.uc_mcontext.nds32_r11, err); __get_user_error(regs->uregs[12], &sf->uc.uc_mcontext.nds32_r12, err); __get_user_error(regs->uregs[13], &sf->uc.uc_mcontext.nds32_r13, err); __get_user_error(regs->uregs[14], &sf->uc.uc_mcontext.nds32_r14, err); __get_user_error(regs->uregs[15], &sf->uc.uc_mcontext.nds32_r15, err); __get_user_error(regs->uregs[16], &sf->uc.uc_mcontext.nds32_r16, err); __get_user_error(regs->uregs[17], &sf->uc.uc_mcontext.nds32_r17, err); __get_user_error(regs->uregs[18], &sf->uc.uc_mcontext.nds32_r18, err); __get_user_error(regs->uregs[19], &sf->uc.uc_mcontext.nds32_r19, err); __get_user_error(regs->uregs[20], &sf->uc.uc_mcontext.nds32_r20, err); __get_user_error(regs->uregs[21], &sf->uc.uc_mcontext.nds32_r21, err); __get_user_error(regs->uregs[22], &sf->uc.uc_mcontext.nds32_r22, err); __get_user_error(regs->uregs[23], &sf->uc.uc_mcontext.nds32_r23, err); __get_user_error(regs->uregs[24], &sf->uc.uc_mcontext.nds32_r24, err); __get_user_error(regs->uregs[25], &sf->uc.uc_mcontext.nds32_r25, err); __get_user_error(regs->fp, &sf->uc.uc_mcontext.nds32_fp, err); __get_user_error(regs->gp, &sf->uc.uc_mcontext.nds32_gp, err); __get_user_error(regs->lp, &sf->uc.uc_mcontext.nds32_lp, err); __get_user_error(regs->sp, &sf->uc.uc_mcontext.nds32_sp, err); __get_user_error(regs->ipc, &sf->uc.uc_mcontext.nds32_ipc, err); #if defined(CONFIG_HWZOL) __get_user_error(regs->lc, &sf->uc.uc_mcontext.zol.nds32_lc, err); __get_user_error(regs->le, &sf->uc.uc_mcontext.zol.nds32_le, err); __get_user_error(regs->lb, &sf->uc.uc_mcontext.zol.nds32_lb, err); #endif #if IS_ENABLED(CONFIG_FPU) err |= restore_sigcontext_fpu(regs, &sf->uc.uc_mcontext); #endif /* * Avoid sys_rt_sigreturn() restarting. */ forget_syscall(regs); return err; }