int restore_fpu(struct rt_sigframe *sigframe, CoreEntry *core) { int ret = 0; if (CORE_THREAD_ARCH_INFO(core)->fpstate) put_fpu_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->fpstate); if (CORE_THREAD_ARCH_INFO(core)->vrstate) ret = put_altivec_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->vrstate); else if (core->ti_ppc64->gpregs->msr & MSR_VEC) { pr_err("Register's data mismatch, corrupted image ?\n"); ret = -1; } if (!ret && CORE_THREAD_ARCH_INFO(core)->vsxstate) ret = put_vsx_regs(&sigframe->uc.uc_mcontext, CORE_THREAD_ARCH_INFO(core)->vsxstate); else if (core->ti_ppc64->gpregs->msr & MSR_VSX) { pr_err("VSX register's data mismatch, corrupted image ?\n"); ret = -1; } if (!ret && CORE_THREAD_ARCH_INFO(core)->tmstate) ret = put_tm_regs(sigframe, CORE_THREAD_ARCH_INFO(core)->tmstate); else if (MSR_TM_ACTIVE(core->ti_ppc64->gpregs->msr)) { pr_err("TM register's data mismatch, corrupted image ?\n"); ret = -1; } return ret; }
unsigned long get_tm_stackpointer(struct pt_regs *regs) { /* When in an active transaction that takes a signal, we need to be * careful with the stack. It's possible that the stack has moved back * up after the tbegin. The obvious case here is when the tbegin is * called inside a function that returns before a tend. In this case, * the stack is part of the checkpointed transactional memory state. * If we write over this non transactionally or in suspend, we are in * trouble because if we get a tm abort, the program counter and stack * pointer will be back at the tbegin but our in memory stack won't be * valid anymore. * * To avoid this, when taking a signal in an active transaction, we * need to use the stack pointer from the checkpointed state, rather * than the speculated state. This ensures that the signal context * (written tm suspended) will be written below the stack required for * the rollback. The transaction is aborted becuase of the treclaim, * so any memory written between the tbegin and the signal will be * rolled back anyway. * * For signals taken in non-TM or suspended mode, we use the * normal/non-checkpointed stack pointer. */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { tm_reclaim_current(TM_CAUSE_SIGNAL); if (MSR_TM_TRANSACTIONAL(regs->msr)) return current->thread.ckpt_regs.gpr[1]; } #endif return regs->gpr[1]; }
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; #endif /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(regs, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; } else /* Fall through, for non-TM restore */ #endif if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) goto badframe; if (restore_altstack(&uc->uc_stack)) goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; badframe: #if DEBUG_SIG printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", regs, uc, &uc->uc_mcontext); #endif if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
int sigreturn_prep_fpu_frame_plain(struct rt_sigframe *frame, struct rt_sigframe *rframe) { uint64_t msr = frame->uc.uc_mcontext.gp_regs[PT_MSR]; update_vregs(&frame->uc.uc_mcontext, &rframe->uc.uc_mcontext); /* Sanity check: If TM so uc_link should be set, otherwise not */ if (MSR_TM_ACTIVE(msr) ^ (!!(frame->uc.uc_link))) { BUG(); return -1; } /* Updating the transactional state address if any */ if (frame->uc.uc_link) { update_vregs(&frame->uc_transact.uc_mcontext, &rframe->uc_transact.uc_mcontext); frame->uc.uc_link = &rframe->uc_transact; } return 0; }
int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ func_descr_t __user *funct_desc_ptr; struct rt_sigframe __user *frame; unsigned long newsp = 0; long err = 0; frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0); if (unlikely(frame == NULL)) goto badframe; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, info); if (err) goto badframe; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, regs, signr, NULL, (unsigned long)ka->sa.sa_handler); } else #endif { err |= __put_user(0, &frame->uc.uc_link); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, signr, NULL, (unsigned long)ka->sa.sa_handler, 1); } err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ current->thread.fp_state.fpscr = 0; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state: */ regs->msr &= ~MSR_TS_MASK; #endif /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; regs->link = (unsigned long) &frame->tramp[0]; } funct_desc_ptr = (func_descr_t __user *) ka->sa.sa_handler; /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); /* Set up "regs" so we "return" to the signal handler. */ err |= get_user(regs->nip, &funct_desc_ptr->entry); /* enter the signal handler in big-endian mode */ regs->msr &= ~MSR_LE; regs->gpr[1] = newsp; err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); regs->gpr[3] = signr; regs->result = 0; if (ka->sa.sa_flags & SA_SIGINFO) { err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); regs->gpr[6] = (unsigned long) frame; } else { regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; } if (err) goto badframe; return 1; badframe: #if DEBUG_SIG printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", regs, frame, newsp); #endif if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "setup_rt_frame", (long)frame, regs->nip, regs->link); force_sigsegv(signr, current); return 0; }
/* * As above, but Transactional Memory is in use, so deliver sigcontexts * containing checkpointed and transactional register states. * * To do this, we treclaim (done before entering here) to gather both sets of * registers and set up the 'normal' sigcontext registers with rolled-back * register values such that a simple signal handler sees a correct * checkpointed register state. If interested, a TM-aware sighandler can * examine the transactional registers in the 2nd sigcontext to determine the * real origin of the signal. */ static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation * Userland shall check AT_HWCAP to know wether it can rely on the * v_regs pointer or not. */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful); elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); #endif unsigned long msr = regs->msr; long err = 0; BUG_ON(!MSR_TM_ACTIVE(regs->msr)); flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* If VEC was enabled there are transactional VRs valid too, * else they're a copy of the checkpointed VRs. */ if (msr & MSR_VEC) err |= __copy_to_user(tm_v_regs, ¤t->thread.transact_vr, 33 * sizeof(vector128)); else err |= __copy_to_user(tm_v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate * that sc->v_reg contains valid data. */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); if (msr & MSR_VEC) err |= __put_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); else err |= __put_user(current->thread.vrsave, (u32 __user *)&tm_v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); err |= __put_user(0, &tm_sc->v_regs); #endif /* CONFIG_ALTIVEC */ /* copy fpr regs and fpscr */ err |= copy_fpr_to_user(&sc->fp_regs, current); if (msr & MSR_FP) err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current); else err |= copy_fpr_to_user(&tm_sc->fp_regs, current); #ifdef CONFIG_VSX /* * Copy VSX low doubleword to local buffer for formatting, * then out to userspace. Update v_regs to point after the * VMX data. */ if (current->thread.used_vsr) { __giveup_vsx(current); v_regs += ELF_NVRREG; tm_v_regs += ELF_NVRREG; err |= copy_vsx_to_user(v_regs, current); if (msr & MSR_VSX) err |= copy_transact_vsx_to_user(tm_v_regs, current); else err |= copy_vsx_to_user(tm_v_regs, current); /* set MSR_VSX in the MSR value in the frame to * indicate that sc->vs_reg) contains valid data. */ msr |= MSR_VSX; } #endif /* CONFIG_VSX */ err |= __put_user(&sc->gp_regs, &sc->regs); err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs); WARN_ON(!FULL_REGS(regs)); err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE); err |= __copy_to_user(&sc->gp_regs, ¤t->thread.ckpt_regs, GP_REGS_SIZE); err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]); err |= __put_user(msr, &sc->gp_regs[PT_MSR]); err |= __put_user(signr, &sc->signal); err |= __put_user(handler, &sc->handler); if (set != NULL) err |= __put_user(set->sig[0], &sc->oldmask); return err; }
int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct task_struct *tsk) { struct rt_sigframe __user *frame; unsigned long newsp = 0; long err = 0; struct pt_regs *regs = tsk->thread.regs; BUG_ON(tsk != current); frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 0); if (unlikely(frame == NULL)) goto badframe; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, &ksig->info); if (err) goto badframe; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) { /* The ucontext_t passed to userland points to the second * ucontext_t (for transactional state) with its uc_link ptr. */ err |= __put_user(&frame->uc_transact, &frame->uc.uc_link); err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext, &frame->uc_transact.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler); } else #endif { err |= __put_user(0, &frame->uc.uc_link); err |= setup_sigcontext(&frame->uc.uc_mcontext, tsk, ksig->sig, NULL, (unsigned long)ksig->ka.sa.sa_handler, 1); } err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto badframe; /* Make sure signal handler doesn't get spurious FP exceptions */ tsk->thread.fp_state.fpscr = 0; /* Set up to return from userspace. */ if (vdso64_rt_sigtramp && tsk->mm->context.vdso_base) { regs->link = tsk->mm->context.vdso_base + vdso64_rt_sigtramp; } else { err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); if (err) goto badframe; regs->link = (unsigned long) &frame->tramp[0]; } /* Allocate a dummy caller frame for the signal handler. */ newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE; err |= put_user(regs->gpr[1], (unsigned long __user *)newsp); /* Set up "regs" so we "return" to the signal handler. */ if (is_elf2_task()) { regs->nip = (unsigned long) ksig->ka.sa.sa_handler; regs->gpr[12] = regs->nip; } else { /* Handler is *really* a pointer to the function descriptor for * the signal routine. The first entry in the function * descriptor is the entry address of signal and the second * entry is the TOC value we need to use. */ func_descr_t __user *funct_desc_ptr = (func_descr_t __user *) ksig->ka.sa.sa_handler; err |= get_user(regs->nip, &funct_desc_ptr->entry); err |= get_user(regs->gpr[2], &funct_desc_ptr->toc); } /* enter the signal handler in native-endian mode */ regs->msr &= ~MSR_LE; regs->msr |= (MSR_KERNEL & MSR_LE); regs->gpr[1] = newsp; regs->gpr[3] = ksig->sig; regs->result = 0; if (ksig->ka.sa.sa_flags & SA_SIGINFO) { err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo); err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc); regs->gpr[6] = (unsigned long) frame; } else { regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext; } if (err) goto badframe; return 0; badframe: if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, tsk->comm, tsk->pid, "setup_rt_frame", (long)frame, regs->nip, regs->link); return 1; }
int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7, unsigned long r8, struct pt_regs *regs) { struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1]; sigset_t set; #ifdef CONFIG_PPC_TRANSACTIONAL_MEM unsigned long msr; #endif BUG_ON(current->thread.regs != regs); /* Always make any pending restarted system calls return -EINTR */ current->restart_block.fn = do_no_restart_syscall; if (!access_ok(VERIFY_READ, uc, sizeof(*uc))) goto badframe; if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); #ifdef CONFIG_PPC_TRANSACTIONAL_MEM /* * If there is a transactional state then throw it away. * The purpose of a sigreturn is to destroy all traces of the * signal frame, this includes any transactional state created * within in. We only check for suspended as we can never be * active in the kernel, we are active, there is nothing better to * do than go ahead and Bad Thing later. * The cause is not important as there will never be a * recheckpoint so it's not user visible. */ if (MSR_TM_SUSPENDED(mfmsr())) tm_reclaim_current(0); if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR])) goto badframe; if (MSR_TM_ACTIVE(msr)) { /* We recheckpoint on return. */ struct ucontext __user *uc_transact; if (__get_user(uc_transact, &uc->uc_link)) goto badframe; if (restore_tm_sigcontexts(current, &uc->uc_mcontext, &uc_transact->uc_mcontext)) goto badframe; } else /* Fall through, for non-TM restore */ #endif if (restore_sigcontext(current, NULL, 1, &uc->uc_mcontext)) goto badframe; if (restore_altstack(&uc->uc_stack)) goto badframe; set_thread_flag(TIF_RESTOREALL); return 0; badframe: if (show_unhandled_signals) printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, current->comm, current->pid, "rt_sigreturn", (long)uc, regs->nip, regs->link); force_sig(SIGSEGV, current); return 0; }
static int __get_task_regs(pid_t pid, user_regs_struct_t *regs, user_fpregs_struct_t *fpregs) { pr_info("Dumping GP/FPU registers for %d\n", pid); /* * This is inspired by kernel function check_syscall_restart in * arch/powerpc/kernel/signal.c */ #ifndef TRAP #define TRAP(r) ((r).trap & ~0xF) #endif if (TRAP(*regs) == 0x0C00 && regs->ccr & 0x10000000) { /* Restart the system call */ switch (regs->gpr[3]) { case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: regs->gpr[3] = regs->orig_gpr3; regs->nip -= 4; break; case ERESTART_RESTARTBLOCK: regs->gpr[0] = __NR_restart_syscall; regs->nip -= 4; break; } } /* Resetting trap since we are now coming from user space. */ regs->trap = 0; fpregs->flags = 0; /* * Check for Transactional Memory operation in progress. * Until we have support of TM register's state through the ptrace API, * we can't checkpoint process with TM operation in progress (almost * impossible) or suspended (easy to get). */ if (MSR_TM_ACTIVE(regs->msr)) { pr_debug("Task %d has %s TM operation at 0x%lx\n", pid, (regs->msr & MSR_TMS) ? "a suspended" : "an active", regs->nip); if (get_tm_regs(pid, fpregs)) return -1; fpregs->flags = USER_FPREGS_FL_TM; } if (get_fpu_regs(pid, fpregs)) return -1; if (get_altivec_regs(pid, fpregs)) return -1; if (fpregs->flags & USER_FPREGS_FL_ALTIVEC) { /* * Save the VSX registers if Altivec registers are supported */ if (get_vsx_regs(pid, fpregs)) return -1; } return 0; }
/* * As above, but Transactional Memory is in use, so deliver sigcontexts * containing checkpointed and transactional register states. * * To do this, we treclaim (done before entering here) to gather both sets of * registers and set up the 'normal' sigcontext registers with rolled-back * register values such that a simple signal handler sees a correct * checkpointed register state. If interested, a TM-aware sighandler can * examine the transactional registers in the 2nd sigcontext to determine the * real origin of the signal. */ static long setup_tm_sigcontexts(struct sigcontext __user *sc, struct sigcontext __user *tm_sc, struct pt_regs *regs, int signr, sigset_t *set, unsigned long handler) { /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the * process never used altivec yet (MSR_VEC is zero in pt_regs of * the context). This is very important because we must ensure we * don't lose the VRSAVE content that may have been set prior to * the process doing its first vector operation * Userland shall check AT_HWCAP to know wether it can rely on the * v_regs pointer or not. */ #ifdef CONFIG_ALTIVEC elf_vrreg_t __user *v_regs = (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful); elf_vrreg_t __user *tm_v_regs = (elf_vrreg_t __user *) (((unsigned long)tm_sc->vmx_reserve + 15) & ~0xful); #endif unsigned long msr = regs->msr; long err = 0; BUG_ON(!MSR_TM_ACTIVE(regs->msr)); /* Remove TM bits from thread's MSR. The MSR in the sigcontext * just indicates to userland that we were doing a transaction, but we * don't want to return in transactional state. This also ensures * that flush_fp_to_thread won't set TIF_RESTORE_TM again. */ regs->msr &= ~MSR_TS_MASK; flush_fp_to_thread(current); #ifdef CONFIG_ALTIVEC err |= __put_user(v_regs, &sc->v_regs); err |= __put_user(tm_v_regs, &tm_sc->v_regs); /* save altivec registers */ if (current->thread.used_vr) { flush_altivec_to_thread(current); /* Copy 33 vec registers (vr0..31 and vscr) to the stack */ err |= __copy_to_user(v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* If VEC was enabled there are transactional VRs valid too, * else they're a copy of the checkpointed VRs. */ if (msr & MSR_VEC) err |= __copy_to_user(tm_v_regs, ¤t->thread.transact_vr, 33 * sizeof(vector128)); else err |= __copy_to_user(tm_v_regs, ¤t->thread.vr_state, 33 * sizeof(vector128)); /* set MSR_VEC in the MSR value in the frame to indicate * that sc->v_reg contains valid data. */ msr |= MSR_VEC; } /* We always copy to/from vrsave, it's 0 if we don't have or don't * use altivec. */ if (cpu_has_feature(CPU_FTR_ALTIVEC)) current->thread.vrsave = mfspr(SPRN_VRSAVE); err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]); if (msr & MSR_VEC) err |= __put_user(current->thread.transact_vrsave, (u32 __user *)&tm_v_regs[33]); else err |= __put_user(current->thread.vrsave, (u32 __user *)&tm_v_regs[33]); #else /* CONFIG_ALTIVEC */ err |= __put_user(0, &sc->v_regs); err |= __put_user(0, &tm_sc->v_regs); #endif /* CONFIG_ALTIVEC */ /* copy fpr regs and fpscr */ err |= c