void cpu_lwp_free(struct lwp *l, int proc) { if (l->l_md.md_fpstate != NULL) fpusave_lwp(l, false); }
/* * 32-bit version of cpu_coredump. */ int cpu_coredump32(struct lwp *l, struct coredump_iostate *iocookie, struct core32 *chdr) { int i, error; struct md_coredump32 md_core; struct coreseg32 cseg; if (iocookie == NULL) { CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); chdr->c_hdrsize = ALIGN(sizeof(*chdr)); chdr->c_seghdrsize = ALIGN(sizeof(cseg)); chdr->c_cpusize = sizeof(md_core); chdr->c_nseg++; return 0; } /* Fake a v8 trapframe */ md_core.md_tf.tf_psr = TSTATECCR_TO_PSR(l->l_md.md_tf->tf_tstate); md_core.md_tf.tf_pc = l->l_md.md_tf->tf_pc; md_core.md_tf.tf_npc = l->l_md.md_tf->tf_npc; md_core.md_tf.tf_y = l->l_md.md_tf->tf_y; for (i=0; i<8; i++) { md_core.md_tf.tf_global[i] = l->l_md.md_tf->tf_global[i]; md_core.md_tf.tf_out[i] = l->l_md.md_tf->tf_out[i]; } if (l->l_md.md_fpstate) { fpusave_lwp(l, true); /* Copy individual fields */ for (i=0; i<32; i++) md_core.md_fpstate.fs_regs[i] = l->l_md.md_fpstate->fs_regs[i]; md_core.md_fpstate.fs_fsr = l->l_md.md_fpstate->fs_fsr; i = md_core.md_fpstate.fs_qsize = l->l_md.md_fpstate->fs_qsize; /* Should always be zero */ while (i--) md_core.md_fpstate.fs_queue[i] = l->l_md.md_fpstate->fs_queue[i]; } else memset(&md_core.md_fpstate, 0, sizeof(md_core.md_fpstate)); CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); cseg.c_addr = 0; cseg.c_size = chdr->c_cpusize; error = coredump_write(iocookie, UIO_SYSSPACE, &cseg, chdr->c_seghdrsize); if (error) return error; return coredump_write(iocookie, UIO_SYSSPACE, &md_core, sizeof(md_core)); }
/* * cpu_lwp_free is called from exit() to let machine-dependent * code free machine-dependent resources. Note that this routine * must not block. */ void cpu_lwp_free(struct lwp *l, int proc) { /* If we were using the FPU, forget about it. */ fpusave_lwp(l, false); #ifdef MTRR if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR) mtrr_clean(l->l_proc); #endif }
void linux32_setregs(struct lwp *l, struct exec_package *pack, u_long stack) { struct pcb *pcb = lwp_getpcb(l); struct trapframe *tf; struct proc *p = l->l_proc; /* If we were using the FPU, forget about it. */ if (pcb->pcb_fpcpu != NULL) fpusave_lwp(l, 0); #if defined(USER_LDT) && 0 pmap_ldt_cleanup(p); #endif netbsd32_adjust_limits(p); l->l_md.md_flags &= ~MDL_USEDFPU; l->l_md.md_flags |= MDL_COMPAT32; /* Forces iret not sysret */ pcb->pcb_flags = PCB_COMPAT32; pcb->pcb_savefpu.fp_fxsave.fx_fcw = __Linux_NPXCW__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr_mask = __INITIAL_MXCSR_MASK__; p->p_flag |= PK_32; tf = l->l_md.md_regs; tf->tf_rax = 0; tf->tf_rbx = (u_int32_t)p->p_psstrp; tf->tf_rcx = pack->ep_entry & 0xffffffff; tf->tf_rdx = 0; tf->tf_rsi = 0; tf->tf_rdi = 0; tf->tf_rbp = 0; tf->tf_rsp = stack & 0xffffffff; tf->tf_r8 = 0; tf->tf_r9 = 0; tf->tf_r10 = 0; tf->tf_r11 = 0; tf->tf_r12 = 0; tf->tf_r13 = 0; tf->tf_r14 = 0; tf->tf_r15 = 0; tf->tf_rip = pack->ep_entry & 0xffffffff; tf->tf_rflags = PSL_USERSET; tf->tf_cs = GSEL(GUCODE32_SEL, SEL_UPL); tf->tf_ss = GSEL(GUDATA32_SEL, SEL_UPL); tf->tf_ds = GSEL(GUDATA32_SEL, SEL_UPL); tf->tf_es = GSEL(GUDATA32_SEL, SEL_UPL); cpu_fsgs_zero(l); cpu_fsgs_reload(l, GSEL(GUDATA32_SEL, SEL_UPL), GSEL(GUDATA32_SEL, SEL_UPL)); }
/* ARGSUSED */ void netbsd32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) { struct proc *p = l->l_proc; struct trapframe64 *tf = l->l_md.md_tf; struct fpstate64 *fs; int64_t tstate; /* Don't allow misaligned code by default */ p->p_md.md_flags &= ~MDP_FIXALIGN; /* Mark this as a 32-bit emulation */ p->p_flag |= PK_32; netbsd32_adjust_limits(p); /* Setup the ev_out32 hook */ #if NFIRM_EVENTS > 0 if (ev_out32_hook == NULL) ev_out32_hook = ev_out32; #endif /* * Set the registers to 0 except for: * %o6: stack pointer, built in exec()) * %tstate: (retain icc and xcc and cwp bits) * %g1: p->p_psstrp (used by crt0) * %tpc,%tnpc: entry point of program */ tstate = ((PSTATE_USER32)<<TSTATE_PSTATE_SHIFT) | (tf->tf_tstate & TSTATE_CWP); if ((fs = l->l_md.md_fpstate) != NULL) { /* * We hold an FPU state. If we own *the* FPU chip state * we must get rid of it, and the only way to do that is * to save it. In any case, get rid of our FPU state. */ fpusave_lwp(l, false); pool_cache_put(fpstate_cache, fs); l->l_md.md_fpstate = NULL; } memset(tf, 0, sizeof *tf); tf->tf_tstate = tstate; tf->tf_global[1] = p->p_psstrp; tf->tf_pc = pack->ep_entry & ~3; tf->tf_npc = tf->tf_pc + 4; stack -= sizeof(struct rwindow32); tf->tf_out[6] = stack; tf->tf_out[7] = 0; }
void netbsd32_setregs(struct lwp *l, struct exec_package *pack, vaddr_t stack) { struct pcb *pcb; struct trapframe *tf; struct proc *p = l->l_proc; pcb = lwp_getpcb(l); /* If we were using the FPU, forget about it. */ if (pcb->pcb_fpcpu != NULL) { fpusave_lwp(l, false); } #if defined(USER_LDT) && 0 pmap_ldt_cleanup(p); #endif netbsd32_adjust_limits(p); l->l_md.md_flags &= ~MDL_USEDFPU; l->l_md.md_flags |= MDL_COMPAT32; /* Force iret not sysret */ pcb->pcb_flags = PCB_COMPAT32; if (pack->ep_osversion >= 699002600) pcb->pcb_savefpu.fp_fxsave.fx_fcw = __NetBSD_NPXCW__; else pcb->pcb_savefpu.fp_fxsave.fx_fcw = __NetBSD_COMPAT_NPXCW__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr_mask = __INITIAL_MXCSR_MASK__; p->p_flag |= PK_32; tf = l->l_md.md_regs; tf->tf_ds = LSEL(LUDATA32_SEL, SEL_UPL); tf->tf_es = LSEL(LUDATA32_SEL, SEL_UPL); cpu_fsgs_zero(l); cpu_fsgs_reload(l, tf->tf_ds, tf->tf_es); tf->tf_rdi = 0; tf->tf_rsi = 0; tf->tf_rbp = 0; tf->tf_rbx = (uint32_t)p->p_psstrp; tf->tf_rdx = 0; tf->tf_rcx = 0; tf->tf_rax = 0; tf->tf_rip = pack->ep_entry; tf->tf_cs = LSEL(LUCODE32_SEL, SEL_UPL); tf->tf_rflags = PSL_USERSET; tf->tf_rsp = stack; tf->tf_ss = LSEL(LUDATA32_SEL, SEL_UPL); }
void cpu_getmcontext32(struct lwp *l, mcontext32_t *mcp, unsigned int *flags) { const struct trapframe *tf = l->l_md.md_regs; __greg32_t *gr = mcp->__gregs; __greg32_t ras_eip; /* Save register context. */ gr[_REG32_GS] = tf->tf_gs; gr[_REG32_FS] = tf->tf_fs; gr[_REG32_ES] = tf->tf_es; gr[_REG32_DS] = tf->tf_ds; gr[_REG32_EFL] = tf->tf_rflags; gr[_REG32_EDI] = tf->tf_rdi; gr[_REG32_ESI] = tf->tf_rsi; gr[_REG32_EBP] = tf->tf_rbp; gr[_REG32_EBX] = tf->tf_rbx; gr[_REG32_EDX] = tf->tf_rdx; gr[_REG32_ECX] = tf->tf_rcx; gr[_REG32_EAX] = tf->tf_rax; gr[_REG32_EIP] = tf->tf_rip; gr[_REG32_CS] = tf->tf_cs; gr[_REG32_ESP] = tf->tf_rsp; gr[_REG32_UESP] = tf->tf_rsp; gr[_REG32_SS] = tf->tf_ss; gr[_REG32_TRAPNO] = tf->tf_trapno; gr[_REG32_ERR] = tf->tf_err; if ((ras_eip = (__greg32_t)(uintptr_t)ras_lookup(l->l_proc, (void *) (uintptr_t)gr[_REG32_EIP])) != -1) gr[_REG32_EIP] = ras_eip; *flags |= _UC_CPU; mcp->_mc_tlsbase = (uint32_t)(uintptr_t)l->l_private; *flags |= _UC_TLSBASE; /* Save floating point register context, if any. */ if ((l->l_md.md_flags & MDL_USEDFPU) != 0) { struct pcb *pcb = lwp_getpcb(l); if (pcb->pcb_fpcpu) { fpusave_lwp(l, true); } memcpy(&mcp->__fpregs, &pcb->pcb_savefpu.fp_fxsave, sizeof (pcb->pcb_savefpu.fp_fxsave)); *flags |= _UC_FPU; } }
/* * cpu_lwp_free is called from exit() to let machine-dependent * code free machine-dependent resources. Note that this routine * must not block. */ void cpu_lwp_free(struct lwp *l, int proc) { struct pcb *pcb = lwp_getpcb(l); /* If we were using the FPU, forget about it. */ if (pcb->pcb_fpcpu != NULL) { fpusave_lwp(l, false); } #ifdef MTRR if (proc && l->l_proc->p_md.md_flags & MDP_USEDMTRR) mtrr_clean(l->l_proc); #endif }
void linux_setregs(struct lwp *l, struct exec_package *epp, u_long stack) { struct pcb *pcb = &l->l_addr->u_pcb; struct trapframe *tf; /* If we were using the FPU, forget about it. */ if (l->l_addr->u_pcb.pcb_fpcpu != NULL) fpusave_lwp(l, 0); l->l_md.md_flags &= ~MDP_USEDFPU; pcb->pcb_flags = 0; pcb->pcb_savefpu.fp_fxsave.fx_fcw = __NetBSD_NPXCW__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr = __INITIAL_MXCSR__; pcb->pcb_savefpu.fp_fxsave.fx_mxcsr_mask = __INITIAL_MXCSR_MASK__; pcb->pcb_fs = 0; pcb->pcb_gs = 0; l->l_proc->p_flag &= ~PK_32; tf = l->l_md.md_regs; tf->tf_rax = 0; tf->tf_rbx = 0; tf->tf_rcx = epp->ep_entry; tf->tf_rdx = 0; tf->tf_rsi = 0; tf->tf_rdi = 0; tf->tf_rbp = 0; tf->tf_rsp = stack; tf->tf_r8 = 0; tf->tf_r9 = 0; tf->tf_r10 = 0; tf->tf_r11 = 0; tf->tf_r12 = 0; tf->tf_r13 = 0; tf->tf_r14 = 0; tf->tf_r15 = 0; tf->tf_rip = epp->ep_entry; tf->tf_rflags = PSL_USERSET; tf->tf_cs = GSEL(GUCODE_SEL, SEL_UPL); tf->tf_ss = GSEL(GUDATA_SEL, SEL_UPL); tf->tf_ds = 0; tf->tf_es = 0; tf->tf_fs = 0; tf->tf_gs = 0; return; }
int cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags) { struct trapframe *tf = l->l_md.md_tf; const __greg32_t *gr = mcp->__gregs; struct proc *p = l->l_proc; int error; /* First ensure consistent stack state (see sendsig). */ write_user_windows(); if (rwindow_save(l)) { mutex_enter(p->p_lock); sigexit(l, SIGILL); } /* Restore register context, if any. */ if ((flags & _UC_CPU) != 0) { error = cpu_mcontext32_validate(l, mcp); if (error) return error; /* Restore general register context. */ /* take only tstate CCR (and ASI) fields */ tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(gr[_REG32_PSR]); tf->tf_pc = (uint64_t)gr[_REG32_PC]; tf->tf_npc = (uint64_t)gr[_REG32_nPC]; tf->tf_y = (uint64_t)gr[_REG32_Y]; tf->tf_global[1] = (uint64_t)gr[_REG32_G1]; tf->tf_global[2] = (uint64_t)gr[_REG32_G2]; tf->tf_global[3] = (uint64_t)gr[_REG32_G3]; tf->tf_global[4] = (uint64_t)gr[_REG32_G4]; tf->tf_global[5] = (uint64_t)gr[_REG32_G5]; tf->tf_global[6] = (uint64_t)gr[_REG32_G6]; /* done in lwp_setprivate */ /* tf->tf_global[7] = (uint64_t)gr[_REG32_G7]; */ tf->tf_out[0] = (uint64_t)gr[_REG32_O0]; tf->tf_out[1] = (uint64_t)gr[_REG32_O1]; tf->tf_out[2] = (uint64_t)gr[_REG32_O2]; tf->tf_out[3] = (uint64_t)gr[_REG32_O3]; tf->tf_out[4] = (uint64_t)gr[_REG32_O4]; tf->tf_out[5] = (uint64_t)gr[_REG32_O5]; tf->tf_out[6] = (uint64_t)gr[_REG32_O6]; tf->tf_out[7] = (uint64_t)gr[_REG32_O7]; /* %asi restored above; %fprs not yet supported. */ if (flags & _UC_TLSBASE) lwp_setprivate(l, (void *)(uintptr_t)gr[_REG32_G7]); /* XXX mcp->__gwins */ } /* Restore floating point register context, if any. */ if ((flags & _UC_FPU) != 0) { #ifdef notyet struct fpstate64 *fsp; const __fpregset_t *fpr = &mcp->__fpregs; /* * If we're the current FPU owner, simply reload it from * the supplied context. Otherwise, store it into the * process' FPU save area (which is used to restore from * by lazy FPU context switching); allocate it if necessary. */ if ((fsp = l->l_md.md_fpstate) == NULL) { fsp = pool_cache_get(fpstate_cache, PR_WAITOK); l->l_md.md_fpstate = fsp; } else { /* Drop the live context on the floor. */ fpusave_lwp(l, false); } /* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */ memcpy(fsp->fs_regs, &fpr->__fpu_fr, sizeof (fpr->__fpu_fr)); fsp->fs_fsr = mcp->__fpregs.__fpu_fsr; fsp->fs_qsize = 0; #if 0 /* Need more info! */ mcp->__fpregs.__fpu_q = NULL; /* `Need more info.' */ mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */ #endif #endif } #ifdef _UC_SETSTACK mutex_enter(p->p_lock); if (flags & _UC_SETSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; if (flags & _UC_CLRSTACK) l->l_sigstk.ss_flags &= ~SS_ONSTACK; mutex_exit(p->p_lock); #endif return (0); }
/* * cpu_lwp_fork: finish a new LWP (l2) operation. * * First LWP (l1) is the process being forked. If it is &lwp0, then we * are creating a kthread, where return path and argument are specified * with `func' and `arg'. * * If an alternate user-level stack is requested (with non-zero values * in both the stack and stacksize arguments), then set up the user stack * pointer accordingly. */ void cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb1, *pcb2; struct trapframe *tf; struct switchframe *sf; vaddr_t uv; pcb1 = lwp_getpcb(l1); pcb2 = lwp_getpcb(l2); /* * If parent LWP was using FPU, then we have to save the FPU h/w * state to PCB so that we can copy it. */ if (pcb1->pcb_fpcpu != NULL) { fpusave_lwp(l1, true); } /* * Sync the PCB before we copy it. */ if (l1 == curlwp) { KASSERT(pcb1 == curpcb); savectx(pcb1); } else { KASSERT(l1 == &lwp0); } /* Copy the PCB from parent. */ memcpy(pcb2, pcb1, sizeof(struct pcb)); #if defined(XEN) pcb2->pcb_iopl = SEL_KPL; #endif /* * Set the kernel stack address (from the address to uarea) and * trapframe address for child. * * Rig kernel stack so that it would start out in lwp_trampoline() * and call child_return() with l2 as an argument. This causes the * newly-created child process to go directly to user level with a * parent return value of 0 from fork(), while the parent process * returns normally. */ uv = uvm_lwp_getuarea(l2); #ifdef __x86_64__ pcb2->pcb_rsp0 = (uv + KSTACK_SIZE - 16) & ~0xf; tf = (struct trapframe *)pcb2->pcb_rsp0 - 1; #else pcb2->pcb_esp0 = (uv + KSTACK_SIZE - 16); tf = (struct trapframe *)pcb2->pcb_esp0 - 1; pcb2->pcb_iomap = NULL; #endif l2->l_md.md_regs = tf; /* * Copy the trapframe from parent, so that return to userspace * will be to right address, with correct registers. */ memcpy(tf, l1->l_md.md_regs, sizeof(struct trapframe)); /* Child LWP might get aston() before returning to userspace. */ tf->tf_trapno = T_ASTFLT; #if 0 /* DIAGNOSTIC */ /* Set a red zone in the kernel stack after the uarea. */ pmap_kremove(uv, PAGE_SIZE); pmap_update(pmap_kernel()); #endif /* If specified, set a different user stack for a child. */ if (stack != NULL) { #ifdef __x86_64__ tf->tf_rsp = (uint64_t)stack + stacksize; #else tf->tf_esp = (uint32_t)stack + stacksize; #endif } l2->l_md.md_flags = l1->l_md.md_flags; l2->l_md.md_astpending = 0; sf = (struct switchframe *)tf - 1; #ifdef __x86_64__ sf->sf_r12 = (uint64_t)func; sf->sf_r13 = (uint64_t)arg; sf->sf_rip = (uint64_t)lwp_trampoline; pcb2->pcb_rsp = (uint64_t)sf; pcb2->pcb_rbp = (uint64_t)l2; #else sf->sf_esi = (int)func; sf->sf_ebx = (int)arg; sf->sf_eip = (int)lwp_trampoline; pcb2->pcb_esp = (int)sf; pcb2->pcb_ebp = (int)l2; #endif }
/* * Implement device not available (DNA) exception * * If we were the last lwp to use the FPU, we can simply return. * Otherwise, we save the previous state, if necessary, and restore * our last saved state. */ void fpudna(struct cpu_info *ci) { uint16_t cw; uint32_t mxcsr; struct lwp *l, *fl; struct pcb *pcb; int s; if (ci->ci_fpsaving) { /* Recursive trap. */ x86_enable_intr(); return; } /* Lock out IPIs and disable preemption. */ s = splhigh(); x86_enable_intr(); /* Save state on current CPU. */ l = ci->ci_curlwp; pcb = lwp_getpcb(l); fl = ci->ci_fpcurlwp; if (fl != NULL) { /* * It seems we can get here on Xen even if we didn't * switch lwp. In this case do nothing */ if (fl == l) { KASSERT(pcb->pcb_fpcpu == ci); clts(); splx(s); return; } KASSERT(fl != l); fpusave_cpu(true); KASSERT(ci->ci_fpcurlwp == NULL); } /* Save our state if on a remote CPU. */ if (pcb->pcb_fpcpu != NULL) { /* Explicitly disable preemption before dropping spl. */ KPREEMPT_DISABLE(l); splx(s); fpusave_lwp(l, true); KASSERT(pcb->pcb_fpcpu == NULL); s = splhigh(); KPREEMPT_ENABLE(l); } /* * Restore state on this CPU, or initialize. Ensure that * the entire update is atomic with respect to FPU-sync IPIs. */ clts(); ci->ci_fpcurlwp = l; pcb->pcb_fpcpu = ci; if ((l->l_md.md_flags & MDL_USEDFPU) == 0) { fninit(); cw = pcb->pcb_savefpu.fp_fxsave.fx_fcw; fldcw(&cw); mxcsr = pcb->pcb_savefpu.fp_fxsave.fx_mxcsr; x86_ldmxcsr(&mxcsr); l->l_md.md_flags |= MDL_USEDFPU; } else { /* * AMD FPU's do not restore FIP, FDP, and FOP on fxrstor, * leaking other process's execution history. Clear them * manually. */ static const double zero = 0.0; int status; /* * Clear the ES bit in the x87 status word if it is currently * set, in order to avoid causing a fault in the upcoming load. */ fnstsw(&status); if (status & 0x80) fnclex(); /* * Load the dummy variable into the x87 stack. This mangles * the x87 stack, but we don't care since we're about to call * fxrstor() anyway. */ fldummy(&zero); fxrstor(&pcb->pcb_savefpu); } KASSERT(ci == curcpu()); splx(s); }
/* * cpu_coredump is called to write a core dump header. */ int cpu_coredump(struct lwp *l, void *iocookie, struct core *chdr) { int error; struct md_coredump md_core; struct coreseg cseg; if (iocookie == NULL) { CORE_SETMAGIC(*chdr, COREMAGIC, MID_MACHINE, 0); chdr->c_hdrsize = ALIGN(sizeof(*chdr)); chdr->c_seghdrsize = ALIGN(sizeof(cseg)); chdr->c_cpusize = sizeof(md_core); chdr->c_nseg++; return 0; } /* Copy important fields over. */ md_core.md_tf.tf_tstate = l->l_md.md_tf->tf_tstate; md_core.md_tf.tf_pc = l->l_md.md_tf->tf_pc; md_core.md_tf.tf_npc = l->l_md.md_tf->tf_npc; md_core.md_tf.tf_y = l->l_md.md_tf->tf_y; md_core.md_tf.tf_tt = l->l_md.md_tf->tf_tt; md_core.md_tf.tf_pil = l->l_md.md_tf->tf_pil; md_core.md_tf.tf_oldpil = l->l_md.md_tf->tf_oldpil; md_core.md_tf.tf_global[0] = l->l_md.md_tf->tf_global[0]; md_core.md_tf.tf_global[1] = l->l_md.md_tf->tf_global[1]; md_core.md_tf.tf_global[2] = l->l_md.md_tf->tf_global[2]; md_core.md_tf.tf_global[3] = l->l_md.md_tf->tf_global[3]; md_core.md_tf.tf_global[4] = l->l_md.md_tf->tf_global[4]; md_core.md_tf.tf_global[5] = l->l_md.md_tf->tf_global[5]; md_core.md_tf.tf_global[6] = l->l_md.md_tf->tf_global[6]; md_core.md_tf.tf_global[7] = l->l_md.md_tf->tf_global[7]; md_core.md_tf.tf_out[0] = l->l_md.md_tf->tf_out[0]; md_core.md_tf.tf_out[1] = l->l_md.md_tf->tf_out[1]; md_core.md_tf.tf_out[2] = l->l_md.md_tf->tf_out[2]; md_core.md_tf.tf_out[3] = l->l_md.md_tf->tf_out[3]; md_core.md_tf.tf_out[4] = l->l_md.md_tf->tf_out[4]; md_core.md_tf.tf_out[5] = l->l_md.md_tf->tf_out[5]; md_core.md_tf.tf_out[6] = l->l_md.md_tf->tf_out[6]; md_core.md_tf.tf_out[7] = l->l_md.md_tf->tf_out[7]; #ifdef DEBUG md_core.md_tf.tf_local[0] = l->l_md.md_tf->tf_local[0]; md_core.md_tf.tf_local[1] = l->l_md.md_tf->tf_local[1]; md_core.md_tf.tf_local[2] = l->l_md.md_tf->tf_local[2]; md_core.md_tf.tf_local[3] = l->l_md.md_tf->tf_local[3]; md_core.md_tf.tf_local[4] = l->l_md.md_tf->tf_local[4]; md_core.md_tf.tf_local[5] = l->l_md.md_tf->tf_local[5]; md_core.md_tf.tf_local[6] = l->l_md.md_tf->tf_local[6]; md_core.md_tf.tf_local[7] = l->l_md.md_tf->tf_local[7]; md_core.md_tf.tf_in[0] = l->l_md.md_tf->tf_in[0]; md_core.md_tf.tf_in[1] = l->l_md.md_tf->tf_in[1]; md_core.md_tf.tf_in[2] = l->l_md.md_tf->tf_in[2]; md_core.md_tf.tf_in[3] = l->l_md.md_tf->tf_in[3]; md_core.md_tf.tf_in[4] = l->l_md.md_tf->tf_in[4]; md_core.md_tf.tf_in[5] = l->l_md.md_tf->tf_in[5]; md_core.md_tf.tf_in[6] = l->l_md.md_tf->tf_in[6]; md_core.md_tf.tf_in[7] = l->l_md.md_tf->tf_in[7]; #endif if (l->l_md.md_fpstate) { fpusave_lwp(l, true); md_core.md_fpstate = *l->l_md.md_fpstate; } else memset(&md_core.md_fpstate, 0, sizeof(md_core.md_fpstate)); CORE_SETMAGIC(cseg, CORESEGMAGIC, MID_MACHINE, CORE_CPU); cseg.c_addr = 0; cseg.c_size = chdr->c_cpusize; error = coredump_write(iocookie, UIO_SYSSPACE, &cseg, chdr->c_seghdrsize); if (error) return error; return coredump_write(iocookie, UIO_SYSSPACE, &md_core, sizeof(md_core)); }
int cpu_setmcontext32(struct lwp *l, const mcontext32_t *mcp, unsigned int flags) { struct trapframe *tf = l->l_md.md_regs; const __greg32_t *gr = mcp->__gregs; struct proc *p = l->l_proc; int error; /* Restore register context, if any. */ if ((flags & _UC_CPU) != 0) { /* * Check for security violations. */ error = cpu_mcontext32_validate(l, mcp); if (error != 0) return error; cpu_fsgs_reload(l, gr[_REG32_FS], gr[_REG32_GS]); tf->tf_es = gr[_REG32_ES]; tf->tf_ds = gr[_REG32_DS]; /* Only change the user-alterable part of eflags */ tf->tf_rflags &= ~PSL_USER; tf->tf_rflags |= (gr[_REG32_EFL] & PSL_USER); tf->tf_rdi = gr[_REG32_EDI]; tf->tf_rsi = gr[_REG32_ESI]; tf->tf_rbp = gr[_REG32_EBP]; tf->tf_rbx = gr[_REG32_EBX]; tf->tf_rdx = gr[_REG32_EDX]; tf->tf_rcx = gr[_REG32_ECX]; tf->tf_rax = gr[_REG32_EAX]; tf->tf_rip = gr[_REG32_EIP]; tf->tf_cs = gr[_REG32_CS]; tf->tf_rsp = gr[_REG32_UESP]; tf->tf_ss = gr[_REG32_SS]; } if ((flags & _UC_TLSBASE) != 0) lwp_setprivate(l, (void *)(uintptr_t)mcp->_mc_tlsbase); /* Restore floating point register context, if any. */ if ((flags & _UC_FPU) != 0) { struct pcb *pcb = lwp_getpcb(l); /* * If we were using the FPU, forget that we were. */ if (pcb->pcb_fpcpu != NULL) { fpusave_lwp(l, false); } memcpy(&pcb->pcb_savefpu.fp_fxsave, &mcp->__fpregs, sizeof (pcb->pcb_savefpu.fp_fxsave)); /* If not set already. */ l->l_md.md_flags |= MDL_USEDFPU; } mutex_enter(p->p_lock); if (flags & _UC_SETSTACK) l->l_sigstk.ss_flags |= SS_ONSTACK; if (flags & _UC_CLRSTACK) l->l_sigstk.ss_flags &= ~SS_ONSTACK; mutex_exit(p->p_lock); return (0); }
void netbsd32_cpu_getmcontext( struct lwp *l, /* netbsd32_mcontext_t XXX */mcontext_t *mcp, unsigned int *flags) { #if 0 /* XXX */ greg32_t *gr = mcp->__gregs; const struct trapframe64 *tf = l->l_md.md_tf; /* First ensure consistent stack state (see sendsig). */ /* XXX? */ write_user_windows(); if (rwindow_save(l)) { mutex_enter(l->l_proc->p_lock); sigexit(l, SIGILL); } /* For now: Erase any random indicators for optional state. */ (void)memset(mcp, 0, sizeof (*mcp)); /* Save general register context. */ gr[_REG_PSR] = TSTATECCR_TO_PSR(tf->tf_tstate); gr[_REG_PC] = tf->tf_pc; gr[_REG_nPC] = tf->tf_npc; gr[_REG_Y] = tf->tf_y; gr[_REG_G1] = tf->tf_global[1]; gr[_REG_G2] = tf->tf_global[2]; gr[_REG_G3] = tf->tf_global[3]; gr[_REG_G4] = tf->tf_global[4]; gr[_REG_G5] = tf->tf_global[5]; gr[_REG_G6] = tf->tf_global[6]; gr[_REG_G7] = tf->tf_global[7]; gr[_REG_O0] = tf->tf_out[0]; gr[_REG_O1] = tf->tf_out[1]; gr[_REG_O2] = tf->tf_out[2]; gr[_REG_O3] = tf->tf_out[3]; gr[_REG_O4] = tf->tf_out[4]; gr[_REG_O5] = tf->tf_out[5]; gr[_REG_O6] = tf->tf_out[6]; gr[_REG_O7] = tf->tf_out[7]; *flags |= (_UC_CPU|_UC_TLSBASE); mcp->__gwins = 0; /* Save FP register context, if any. */ if (l->l_md.md_fpstate != NULL) { struct fpstate *fsp; netbsd32_fpregset_t *fpr = &mcp->__fpregs; /* * If our FP context is currently held in the FPU, take a * private snapshot - lazy FPU context switching can deal * with it later when it becomes necessary. * Otherwise, get it from the process's save area. */ fpusave_lwp(l, true); fsp = l->l_md.md_fpstate; memcpy(&fpr->__fpu_fr, fsp->fs_regs, sizeof (fpr->__fpu_fr)); mcp->__fpregs.__fpu_q = NULL; /* `Need more info.' */ mcp->__fpregs.__fpu_fsr = fs.fs_fsr; mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */ mcp->__fpregs.__fpu_q_entrysize = sizeof (struct netbsd32_fq); mcp->__fpregs.__fpu_en = 1; *flags |= _UC_FPU; } else { mcp->__fpregs.__fpu_en = 0; } mcp->__xrs.__xrs_id = 0; /* Solaris extension? */ #endif }
int netbsd32_cpu_setmcontext( struct lwp *l, /* XXX const netbsd32_*/mcontext_t *mcp, unsigned int flags) { #ifdef NOT_YET /* XXX */ greg32_t *gr = mcp->__gregs; struct trapframe64 *tf = l->l_md.md_tf; /* First ensure consistent stack state (see sendsig). */ write_user_windows(); if (rwindow_save(p)) { mutex_enter(l->l_proc->p_lock); sigexit(p, SIGILL); } if ((flags & _UC_CPU) != 0) { /* * Only the icc bits in the psr are used, so it need not be * verified. pc and npc must be multiples of 4. This is all * that is required; if it holds, just do it. */ if (((gr[_REG_PC] | gr[_REG_nPC]) & 3) != 0 || gr[_REG_PC] == 0 || gr[_REG_nPC] == 0) return (EINVAL); /* Restore general register context. */ /* take only tstate CCR (and ASI) fields */ tf->tf_tstate = (tf->tf_tstate & ~TSTATE_CCR) | PSRCC_TO_TSTATE(gr[_REG_PSR]); tf->tf_pc = (uint64_t)gr[_REG_PC]; tf->tf_npc = (uint64_t)gr[_REG_nPC]; tf->tf_y = (uint64_t)gr[_REG_Y]; tf->tf_global[1] = (uint64_t)gr[_REG_G1]; tf->tf_global[2] = (uint64_t)gr[_REG_G2]; tf->tf_global[3] = (uint64_t)gr[_REG_G3]; tf->tf_global[4] = (uint64_t)gr[_REG_G4]; tf->tf_global[5] = (uint64_t)gr[_REG_G5]; tf->tf_global[6] = (uint64_t)gr[_REG_G6]; tf->tf_global[7] = (uint64_t)gr[_REG_G7]; tf->tf_out[0] = (uint64_t)gr[_REG_O0]; tf->tf_out[1] = (uint64_t)gr[_REG_O1]; tf->tf_out[2] = (uint64_t)gr[_REG_O2]; tf->tf_out[3] = (uint64_t)gr[_REG_O3]; tf->tf_out[4] = (uint64_t)gr[_REG_O4]; tf->tf_out[5] = (uint64_t)gr[_REG_O5]; tf->tf_out[6] = (uint64_t)gr[_REG_O6]; tf->tf_out[7] = (uint64_t)gr[_REG_O7]; /* %asi restored above; %fprs not yet supported. */ /* XXX mcp->__gwins */ } /* Restore FP register context, if any. */ if ((flags & _UC_FPU) != 0 && mcp->__fpregs.__fpu_en != 0) { struct fpstate *fsp; const netbsd32_fpregset_t *fpr = &mcp->__fpregs; int reload = 0; /* * If we're the current FPU owner, simply reload it from * the supplied context. Otherwise, store it into the * process' FPU save area (which is used to restore from * by lazy FPU context switching); allocate it if necessary. */ /* * XXX Should we really activate the supplied FPU context * XXX immediately or just fault it in later? */ if ((fsp = l->l_md.md_fpstate) == NULL) { fsp = pool_cache_get(fpstate_cache, PR_WAITOK); l->l_md.md_fpstate = fsp; } else { /* Drop the live context on the floor. */ fpusave_lwp(l, false); reload = 1; } /* Note: sizeof fpr->__fpu_fr <= sizeof fsp->fs_regs. */ memcpy(fsp->fs_regs, fpr->__fpu_fr, sizeof (fpr->__fpu_fr)); fsp->fs_fsr = fpr->__fpu_fsr; /* don't care about fcc1-3 */ fsp->fs_qsize = 0; #if 0 /* Need more info! */ mcp->__fpregs.__fpu_q = NULL; /* `Need more info.' */ mcp->__fpregs.__fpu_qcnt = 0 /*fs.fs_qsize*/; /* See above */ #endif /* Reload context again, if necessary. */ if (reload) loadfpstate(fsp); } /* XXX mcp->__xrs */ /* XXX mcp->__asrs */ #endif return (0); }
void cpu_lwp_fork(register struct lwp *l1, register struct lwp *l2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *opcb = lwp_getpcb(l1); struct pcb *npcb = lwp_getpcb(l2); struct trapframe *tf2; struct rwindow *rp; /* * Save all user registers to l1's stack or, in the case of * user registers and invalid stack pointers, to opcb. * We then copy the whole pcb to l2; when switch() selects l2 * to run, it will run at the `lwp_trampoline' stub, rather * than returning at the copying code below. * * If process l1 has an FPU state, we must copy it. If it is * the FPU user, we must save the FPU state first. */ #ifdef NOTDEF_DEBUG printf("cpu_lwp_fork()\n"); #endif if (l1 == curlwp) { write_user_windows(); /* * We're in the kernel, so we don't really care about * %ccr or %asi. We do want to duplicate %pstate and %cwp. */ opcb->pcb_pstate = getpstate(); opcb->pcb_cwp = getcwp(); } #ifdef DIAGNOSTIC else if (l1 != &lwp0) panic("cpu_lwp_fork: curlwp"); #endif #ifdef DEBUG /* prevent us from having NULL lastcall */ opcb->lastcall = cpu_forkname; #else opcb->lastcall = NULL; #endif memcpy(npcb, opcb, sizeof(struct pcb)); if (l1->l_md.md_fpstate) { fpusave_lwp(l1, true); l2->l_md.md_fpstate = pool_cache_get(fpstate_cache, PR_WAITOK); memcpy(l2->l_md.md_fpstate, l1->l_md.md_fpstate, sizeof(struct fpstate64)); } else l2->l_md.md_fpstate = NULL; /* * Setup (kernel) stack frame that will by-pass the child * out of the kernel. (The trap frame invariably resides at * the tippity-top of the u. area.) */ tf2 = l2->l_md.md_tf = (struct trapframe *) ((long)npcb + USPACE - sizeof(*tf2)); /* Copy parent's trapframe */ *tf2 = *(struct trapframe *)((long)opcb + USPACE - sizeof(*tf2)); /* * If specified, give the child a different stack. */ if (stack != NULL) tf2->tf_out[6] = (uint64_t)(u_long)stack + stacksize; /* * Set return values in child mode and clear condition code, * in case we end up running a signal handler before returning * to userland. */ tf2->tf_out[0] = 0; tf2->tf_out[1] = 1; tf2->tf_tstate &= ~TSTATE_CCR; /* Construct kernel frame to return to in cpu_switch() */ rp = (struct rwindow *)((u_long)npcb + TOPFRAMEOFF); *rp = *(struct rwindow *)((u_long)opcb + TOPFRAMEOFF); rp->rw_local[0] = (long)func; /* Function to call */ rp->rw_local[1] = (long)arg; /* and its argument */ rp->rw_local[2] = (long)l2; /* new lwp */ npcb->pcb_pc = (long)lwp_trampoline - 8; npcb->pcb_sp = (long)rp - STACK_OFFSET; }