int cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) { struct trapframe *tf = l->l_md.md_regs; const __greg_t *gr = mcp->__gregs; struct proc *p = l->l_proc; int error; /* Restore register context, if any. */ if ((flags & _UC_CPU) != 0) { /* Check for security violations. */ error = cpu_mcontext_validate(l, mcp); if (error) return error; /* done in lwp_setprivate */ /* tf->tf_gbr = gr[_REG_GBR]; */ tf->tf_spc = gr[_REG_PC]; tf->tf_ssr = gr[_REG_SR]; tf->tf_macl = gr[_REG_MACL]; tf->tf_mach = gr[_REG_MACH]; tf->tf_pr = gr[_REG_PR]; tf->tf_r14 = gr[_REG_R14]; tf->tf_r13 = gr[_REG_R13]; tf->tf_r12 = gr[_REG_R12]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r0 = gr[_REG_R0]; tf->tf_r15 = gr[_REG_R15]; if (flags & _UC_TLSBASE) lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_GBR]); } #if 0 /* XXX: FPU context is currently not handled by the kernel. */ if (flags & _UC_FPU) { /* TODO */; } #endif mutex_enter(p->p_lock); if (flags & _UC_SETSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; if (flags & _UC_CLRSTACK) l->l_sigstk.ss_flags &= ~SS_ONSTACK; mutex_exit(p->p_lock); return (0); }
int sys__lwp_create(struct lwp *l, const struct sys__lwp_create_args *uap, register_t *retval) { /* { syscallarg(const ucontext_t *) ucp; syscallarg(u_long) flags; syscallarg(lwpid_t *) new_lwp; } */ struct proc *p = l->l_proc; ucontext_t *newuc; lwpid_t lid; int error; newuc = kmem_alloc(sizeof(ucontext_t), KM_SLEEP); error = copyin(SCARG(uap, ucp), newuc, p->p_emul->e_ucsize); if (error) goto fail; /* validate the ucontext */ if ((newuc->uc_flags & _UC_CPU) == 0) { error = EINVAL; goto fail; } error = cpu_mcontext_validate(l, &newuc->uc_mcontext); if (error) goto fail; error = do_lwp_create(l, newuc, SCARG(uap, flags), &lid); if (error) goto fail; /* * do not free ucontext in case of an error here, * the lwp will actually run and access it */ return copyout(&lid, SCARG(uap, new_lwp), sizeof(lid)); fail: kmem_free(newuc, sizeof(ucontext_t)); return error; }
int cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) { struct trapframe *tf = l->l_md.md_regs; struct proc *p = l->l_proc; struct pmap *pmap = p->p_vmspace->vm_map.pmap; const __greg_t *gr = mcp->__gregs; int error; if ((flags & _UC_CPU) != 0) { error = cpu_mcontext_validate(l, mcp); if (error) return error; tf->tf_ipsw = gr[0] | (hppa_cpu_ispa20_p() ? PSW_O : 0); tf->tf_r1 = gr[1]; tf->tf_rp = gr[2]; tf->tf_r3 = gr[3]; tf->tf_r4 = gr[4]; tf->tf_r5 = gr[5]; tf->tf_r6 = gr[6]; tf->tf_r7 = gr[7]; tf->tf_r8 = gr[8]; tf->tf_r9 = gr[9]; tf->tf_r10 = gr[10]; tf->tf_r11 = gr[11]; tf->tf_r12 = gr[12]; tf->tf_r13 = gr[13]; tf->tf_r14 = gr[14]; tf->tf_r15 = gr[15]; tf->tf_r16 = gr[16]; tf->tf_r17 = gr[17]; tf->tf_r18 = gr[18]; tf->tf_t4 = gr[19]; tf->tf_t3 = gr[20]; tf->tf_t2 = gr[21]; tf->tf_t1 = gr[22]; tf->tf_arg3 = gr[23]; tf->tf_arg2 = gr[24]; tf->tf_arg1 = gr[25]; tf->tf_arg0 = gr[26]; tf->tf_dp = gr[27]; tf->tf_ret0 = gr[28]; tf->tf_ret1 = gr[29]; tf->tf_sp = gr[30]; tf->tf_r31 = gr[31]; tf->tf_sar = gr[_REG_SAR]; tf->tf_iisq_head = pmap_sid(pmap, gr[_REG_PCOQH]); tf->tf_iisq_tail = pmap_sid(pmap, gr[_REG_PCOQT]); tf->tf_iioq_head = gr[_REG_PCOQH]; tf->tf_iioq_tail = gr[_REG_PCOQT]; if (tf->tf_iioq_head >= 0xc0000020) { tf->tf_iioq_head &= ~HPPA_PC_PRIV_MASK; } else { tf->tf_iioq_head |= HPPA_PC_PRIV_USER; } if (tf->tf_iioq_tail >= 0xc0000020) { tf->tf_iioq_tail &= ~HPPA_PC_PRIV_MASK; } else { tf->tf_iioq_tail |= HPPA_PC_PRIV_USER; } #if 0 tf->tf_sr0 = gr[_REG_SR0]; tf->tf_sr1 = gr[_REG_SR1]; tf->tf_sr2 = gr[_REG_SR2]; tf->tf_sr3 = gr[_REG_SR3]; tf->tf_sr4 = gr[_REG_SR4]; tf->tf_cr26 = gr[_REG_CR26]; #endif } /* Restore the private thread context */ if (flags & _UC_TLSBASE) { lwp_setprivate(l, (void *)(uintptr_t)gr[_REG_CR27]); tf->tf_cr27 = gr[_REG_CR27]; } /* Restore the floating point registers */ if ((flags & _UC_FPU) != 0) { struct pcb *pcb = lwp_getpcb(l); hppa_fpu_flush(l); memcpy(pcb->pcb_fpregs, &mcp->__fpregs, sizeof(mcp->__fpregs)); } mutex_enter(p->p_lock); if (flags & _UC_SETSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; if (flags & _UC_CLRSTACK) l->l_sigstk.ss_flags &= ~SS_ONSTACK; mutex_exit(p->p_lock); return 0; }
int cpu_setmcontext(struct lwp *l, const mcontext_t *mcp, unsigned int flags) { struct trapframe *tf = l->l_md.md_utf; struct proc *p = l->l_proc; const __greg_t *gr = mcp->__gregs; int error; /* Restore register context, if any. */ if (flags & _UC_CPU) { error = cpu_mcontext_validate(l, mcp); if (error) return error; /* Save register context. */ #ifdef __mips_n32 CTASSERT(_R_AST == _REG_AT); if (__predict_false(p->p_md.md_abi == _MIPS_BSD_API_O32)) { const mcontext_o32_t *mcp32 = (const mcontext_o32_t *)mcp; const __greg32_t *gr32 = mcp32->__gregs; for (size_t i = _R_AST; i < 32; i++) { tf->tf_regs[i] = gr32[i]; } } else #endif memcpy(&tf->tf_regs[_R_AST], &gr[_REG_AT], sizeof(mips_reg_t) * 31); tf->tf_regs[_R_MULLO] = gr[_REG_MDLO]; tf->tf_regs[_R_MULHI] = gr[_REG_MDHI]; tf->tf_regs[_R_CAUSE] = gr[_REG_CAUSE]; tf->tf_regs[_R_PC] = gr[_REG_EPC]; /* Do not restore SR. */ } /* Restore the private thread context */ if (flags & _UC_TLSBASE) { lwp_setprivate(l, (void *)(intptr_t)mcp->_mc_tlsbase); } /* Restore floating point register context, if any. */ if (flags & _UC_FPU) { size_t fplen; /* Disable the FPU contents. */ fpu_discard(); #if !defined(__mips_o32) if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) { #endif fplen = sizeof(struct fpreg); #if !defined(__mips_o32) } else { fplen = sizeof(struct fpreg_oabi); } #endif /* * The PCB FP regs struct includes the FP CSR, so use the * proper size of fpreg when copying. */ struct pcb * const pcb = lwp_getpcb(l); memcpy(&pcb->pcb_fpregs, &mcp->__fpregs, fplen); } mutex_enter(p->p_lock); if (flags & _UC_SETSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; if (flags & _UC_CLRSTACK) l->l_sigstk.ss_flags &= ~SS_ONSTACK; mutex_exit(p->p_lock); return (0); }