void cpu_getmcontext32(struct lwp *l, mcontext32_t *mcp, unsigned int *flags) { const struct trapframe * const tf = l->l_md.md_utf; /* Save register context. */ for (size_t i = _X_RA; i <= _X_GP; i++) { mcp->__gregs[i] = tf->tf_reg[i]; } mcp->__gregs[_REG_PC] = tf->tf_pc; mcp->__private = (intptr_t)l->l_private; *flags |= _UC_CPU | _UC_TLSBASE; /* Save floating point register context, if any. */ KASSERT(l == curlwp); if (fpu_valid_p()) { /* * If this process is the current FP owner, dump its * context to the PCB first. */ fpu_save(); struct pcb * const pcb = lwp_getpcb(l); *(struct fpreg *)mcp->__fregs = pcb->pcb_fpregs; *flags |= _UC_FPU; } }
int thread_save(struct thread *thread) { if (thread && thread->fxdata) { fpu_save(thread->fxdata); } _active_thread = NULL; return 0; }
/* * save the FPU state to a signal context */ int fpu_setup_sigcontext(struct fpucontext *fpucontext) { struct task_struct *tsk = current; if (!is_using_fpu(tsk)) return 0; /* transfer the current FPU state to memory and cause fpu_init() to be * triggered by the next attempted FPU operation by the current * process. */ preempt_disable(); #ifndef CONFIG_LAZY_SAVE_FPU if (tsk->thread.fpu_flags & THREAD_HAS_FPU) { fpu_save(&tsk->thread.fpu_state); tsk->thread.uregs->epsw &= ~EPSW_FE; tsk->thread.fpu_flags &= ~THREAD_HAS_FPU; } #else /* !CONFIG_LAZY_SAVE_FPU */ if (fpu_state_owner == tsk) { fpu_save(&tsk->thread.fpu_state); fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE; fpu_state_owner = NULL; } #endif /* !CONFIG_LAZY_SAVE_FPU */ preempt_enable(); /* we no longer have a valid current FPU state */ clear_using_fpu(tsk); /* transfer the saved FPU state onto the userspace stack */ if (copy_to_user(fpucontext, &tsk->thread.fpu_state, min(sizeof(struct fpu_state_struct), sizeof(struct fpucontext)))) return -1; return 1; }
void cpu_getmcontext(struct lwp *l, mcontext_t *mcp, unsigned int *flags) { const struct trapframe *tf = l->l_md.md_utf; __greg_t *gr = mcp->__gregs; __greg_t ras_pc; /* Save register context. Dont copy R0 - it is always 0 */ memcpy(&gr[_REG_AT], &tf->tf_regs[_R_AST], sizeof(mips_reg_t) * 31); gr[_REG_MDLO] = tf->tf_regs[_R_MULLO]; gr[_REG_MDHI] = tf->tf_regs[_R_MULHI]; gr[_REG_CAUSE] = tf->tf_regs[_R_CAUSE]; gr[_REG_EPC] = tf->tf_regs[_R_PC]; gr[_REG_SR] = tf->tf_regs[_R_SR]; mcp->_mc_tlsbase = (intptr_t)l->l_private; if ((ras_pc = (intptr_t)ras_lookup(l->l_proc, (void *) (intptr_t)gr[_REG_EPC])) != -1) gr[_REG_EPC] = ras_pc; *flags |= _UC_CPU | _UC_TLSBASE; /* Save floating point register context, if any. */ KASSERT(l == curlwp); if (fpu_used_p()) { size_t fplen; /* * If this process is the current FP owner, dump its * context to the PCB first. */ fpu_save(); /* * The PCB FP regs struct includes the FP CSR, so use the * size of __fpregs.__fp_r when copying. */ #if !defined(__mips_o32) if (_MIPS_SIM_NEWABI_P(l->l_proc->p_md.md_abi)) { #endif fplen = sizeof(struct fpreg); #if !defined(__mips_o32) } else { fplen = sizeof(struct fpreg_oabi); } #endif struct pcb * const pcb = lwp_getpcb(l); memcpy(&mcp->__fpregs, &pcb->pcb_fpregs, fplen); *flags |= _UC_FPU; } }
int process_read_fpregs(struct lwp *l, struct fpreg *fpregs, size_t *sz) { struct pcb * const pcb = lwp_getpcb(l); if (l == curlwp) { /* Is the process using the fpu? */ if (!fpu_valid_p()) { memset(fpregs, 0, sizeof (*fpregs)); return 0; } fpu_save(); } else { KASSERTMSG(l->l_pcu_cpu[PCU_FPU] == NULL, "%s: FPU of l (%p) active on %s", __func__, l, l->l_pcu_cpu[PCU_FPU]->ci_cpuname); } *fpregs = pcb->pcb_fpregs; return 0; }
/* * Routine: cpu_sleep * Function: */ void cpu_sleep( void) { struct per_proc_info *proc_info; unsigned int i; unsigned int wait_ncpus_sleep, ncpus_sleep; facility_context *fowner; proc_info = getPerProc(); proc_info->running = FALSE; fowner = proc_info->FPU_owner; /* Cache this */ if(fowner) /* If anyone owns FPU, save it */ fpu_save(fowner); proc_info->FPU_owner = NULL; /* Set no fpu owner now */ fowner = proc_info->VMX_owner; /* Cache this */ if(fowner) vec_save(fowner); /* If anyone owns vectors, save it */ proc_info->VMX_owner = NULL; /* Set no vector owner now */ if (proc_info->cpu_number == master_cpu) { proc_info->cpu_flags &= BootDone; proc_info->interrupts_enabled = 0; proc_info->pending_ast = AST_NONE; if (proc_info->start_paddr == EXCEPTION_VECTOR(T_RESET)) { ml_phys_write((vm_offset_t)&ResetHandler + 0, RESET_HANDLER_START); ml_phys_write((vm_offset_t)&ResetHandler + 4, (vm_offset_t)_start_cpu); ml_phys_write((vm_offset_t)&ResetHandler + 8, (vm_offset_t)&PerProcTable[master_cpu]); __asm__ volatile("sync"); __asm__ volatile("isync"); }
/* * Send a signal to process. */ void sendsig_sigcontext(const ksiginfo_t *ksi, const sigset_t *returnmask) { int sig = ksi->ksi_signo; struct lwp * const l = curlwp; struct proc * const p = l->l_proc; struct sigacts * const ps = p->p_sigacts; struct pcb * const pcb = lwp_getpcb(l); int onstack, error; struct sigcontext *scp = getframe(l, sig, &onstack); struct sigcontext ksc; struct trapframe * const tf = l->l_md.md_utf; sig_t catcher = SIGACTION(p, sig).sa_handler; #if !defined(__mips_o32) if (p->p_md.md_abi != _MIPS_BSD_API_O32) sigexit(l, SIGILL); #endif scp--; #ifdef DEBUG if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sendsig(%d): sig %d ssp %p scp %p\n", p->p_pid, sig, &onstack, scp); #endif /* Build stack frame for signal trampoline. */ ksc.sc_pc = tf->tf_regs[_R_PC]; ksc.mullo = tf->tf_regs[_R_MULLO]; ksc.mulhi = tf->tf_regs[_R_MULHI]; /* Save register context. */ ksc.sc_regs[_R_ZERO] = 0xACEDBADE; /* magic number */ #if defined(__mips_o32) memcpy(&ksc.sc_regs[1], &tf->tf_regs[1], sizeof(ksc.sc_regs) - sizeof(ksc.sc_regs[0])); #else for (size_t i = 1; i < 32; i++) ksc.sc_regs[i] = tf->tf_regs[i]; #endif /* Save the FP state, if necessary, then copy it. */ ksc.sc_fpused = fpu_used_p(); #if !defined(NOFPU) if (ksc.sc_fpused) { /* if FPU has current state, save it first */ fpu_save(); } #endif *(struct fpreg *)ksc.sc_fpregs = *(struct fpreg *)&pcb->pcb_fpregs; /* Save signal stack. */ ksc.sc_onstack = l->l_sigstk.ss_flags & SS_ONSTACK; /* Save signal mask. */ ksc.sc_mask = *returnmask; #if defined(COMPAT_13) || defined(COMPAT_ULTRIX) /* * XXX We always have to save an old style signal mask because * XXX we might be delivering a signal to a process which will * XXX escape from the signal in a non-standard way and invoke * XXX sigreturn() directly. */ native_sigset_to_sigset13(returnmask, &ksc.__sc_mask13); #endif sendsig_reset(l, sig); mutex_exit(p->p_lock); error = copyout(&ksc, (void *)scp, sizeof(ksc)); mutex_enter(p->p_lock); if (error != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ #ifdef DEBUG if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sendsig(%d): copyout failed on sig %d\n", p->p_pid, sig); #endif sigexit(l, SIGILL); /* NOTREACHED */ } /* * Set up the registers to directly invoke the signal * handler. The return address will be set up to point * to the signal trampoline to bounce us back. */ tf->tf_regs[_R_A0] = sig; tf->tf_regs[_R_A1] = ksi->ksi_trap; tf->tf_regs[_R_A2] = (intptr_t)scp; tf->tf_regs[_R_A3] = (intptr_t)catcher; /* XXX ??? */ tf->tf_regs[_R_PC] = (intptr_t)catcher; tf->tf_regs[_R_T9] = (intptr_t)catcher; tf->tf_regs[_R_SP] = (intptr_t)scp; switch (ps->sa_sigdesc[sig].sd_vers) { case 0: /* legacy on-stack sigtramp */ tf->tf_regs[_R_RA] = (intptr_t)p->p_sigctx.ps_sigcode; break; #ifdef COMPAT_16 case 1: tf->tf_regs[_R_RA] = (intptr_t)ps->sa_sigdesc[sig].sd_tramp; break; #endif default: /* Don't know what trampoline version; kill it. */ sigexit(l, SIGILL); } /* Remember that we're now on the signal stack. */ if (onstack) l->l_sigstk.ss_flags |= SS_ONSTACK; #ifdef DEBUG if ((sigdebug & SDB_FOLLOW) || ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid)) printf("sendsig(%d): sig %d returns\n", p->p_pid, sig); #endif }
void sigtrap_handler(HANDLER_ARGS) { unsigned int trap; #ifdef __linux__ GET_CONTEXT #endif #if 0 fprintf(stderr, "x86sigtrap: %8x %x\n", context->sc_pc, *(unsigned char *) (context->sc_pc - 1)); fprintf(stderr, "sigtrap(%d %d %x)\n", signal, code, context); #endif if (single_stepping && (signal == SIGTRAP)) { #if 0 fprintf(stderr, "* Single step trap %x\n", single_stepping); #endif #ifndef __linux__ /* Un-install single step helper instructions. */ *(single_stepping - 3) = single_step_save1; *(single_stepping - 2) = single_step_save2; *(single_stepping - 1) = single_step_save3; #else context->eflags ^= 0x100; #endif /* * Re-install the breakpoint if possible. */ if ((int) context->sc_pc == (int) single_stepping + 1) fprintf(stderr, "* Breakpoint not re-install\n"); else { char *ptr = (char *) single_stepping; ptr[0] = BREAKPOINT_INST; /* x86 INT3 */ ptr[1] = trap_Breakpoint; } single_stepping = NULL; return; } /* This is just for info in case monitor wants to print an approx */ current_control_stack_pointer = (unsigned long *) context->sc_sp; #if defined(__linux__) && (defined(i386) || defined(__x86_64)) /* * Restore the FPU control word, setting the rounding mode to nearest. */ if (contextstruct.fpstate) #if defined(__x86_64) setfpucw(contextstruct.fpstate->cwd & ~0xc00); #else setfpucw(contextstruct.fpstate->cw & ~0xc00); #endif #endif /* * On entry %eip points just after the INT3 byte and aims at the * 'kind' value (eg trap_Cerror). For error-trap and Cerror-trap a * number of bytes will follow, the first is the length of the byte * arguments to follow. */ trap = *(unsigned char *) (context->sc_pc); switch (trap) { case trap_PendingInterrupt: DPRINTF(0, (stderr, "<trap Pending Interrupt.>\n")); arch_skip_instruction(context); interrupt_handle_pending(context); break; case trap_Halt: { #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) int fpu_state[27]; fpu_save(fpu_state); #endif fake_foreign_function_call(context); lose("%%primitive halt called; the party is over.\n"); undo_fake_foreign_function_call(context); #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) fpu_restore(fpu_state); #endif arch_skip_instruction(context); break; } case trap_Error: case trap_Cerror: DPRINTF(0, (stderr, "<trap Error %d>\n", code)); #ifdef __linux__ interrupt_internal_error(signal, contextstruct, code == trap_Cerror); #else interrupt_internal_error(signal, code, context, code == trap_Cerror); #endif break; case trap_Breakpoint: #if 0 fprintf(stderr, "*C break\n"); #endif (char *) context->sc_pc -= 1; handle_breakpoint(signal, code, context); #if 0 fprintf(stderr, "*C break return\n"); #endif break; case trap_FunctionEndBreakpoint: (char *) context->sc_pc -= 1; context->sc_pc = (int) handle_function_end_breakpoint(signal, code, context); break; #ifdef DYNAMIC_SPACE_OVERFLOW_WARNING_HIT case trap_DynamicSpaceOverflowWarning: interrupt_handle_space_overflow(SymbolFunction (DYNAMIC_SPACE_OVERFLOW_WARNING_HIT), context); break; #endif #ifdef DYNAMIC_SPACE_OVERFLOW_ERROR_HIT case trap_DynamicSpaceOverflowError: interrupt_handle_space_overflow(SymbolFunction (DYNAMIC_SPACE_OVERFLOW_ERROR_HIT), context); break; #endif default: DPRINTF(0, (stderr, "[C--trap default %d %d %x]\n", signal, code, context)); #ifdef __linux__ interrupt_handle_now(signal, contextstruct); #else interrupt_handle_now(signal, code, context); #endif break; } }
/* * Send an interrupt to process. */ void sendsig(sig_t catcher, int sig, int mask, u_long code, int type, union sigval val) { struct proc *p = curproc; struct sigframe *fp, frame; struct trapframe *tf = p->p_md.md_regs; struct sigacts *psp = p->p_sigacts; siginfo_t *sip; int onstack; onstack = p->p_sigstk.ss_flags & SS_ONSTACK; if ((p->p_sigstk.ss_flags & SS_DISABLE) == 0 && onstack == 0 && (psp->ps_sigonstack & sigmask(sig))) { fp = (struct sigframe *)((vaddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size); p->p_sigstk.ss_flags |= SS_ONSTACK; } else fp = (void *)p->p_md.md_regs->tf_r15; --fp; bzero(&frame, sizeof(frame)); if (psp->ps_siginfo & sigmask(sig)) { initsiginfo(&frame.sf_si, sig, code, type, val); sip = &fp->sf_si; } else sip = NULL; /* Save register context. */ frame.sf_uc.sc_reg.r_spc = tf->tf_spc; frame.sf_uc.sc_reg.r_ssr = tf->tf_ssr; frame.sf_uc.sc_reg.r_pr = tf->tf_pr; frame.sf_uc.sc_reg.r_mach = tf->tf_mach; frame.sf_uc.sc_reg.r_macl = tf->tf_macl; frame.sf_uc.sc_reg.r_r15 = tf->tf_r15; frame.sf_uc.sc_reg.r_r14 = tf->tf_r14; frame.sf_uc.sc_reg.r_r13 = tf->tf_r13; frame.sf_uc.sc_reg.r_r12 = tf->tf_r12; frame.sf_uc.sc_reg.r_r11 = tf->tf_r11; frame.sf_uc.sc_reg.r_r10 = tf->tf_r10; frame.sf_uc.sc_reg.r_r9 = tf->tf_r9; frame.sf_uc.sc_reg.r_r8 = tf->tf_r8; frame.sf_uc.sc_reg.r_r7 = tf->tf_r7; frame.sf_uc.sc_reg.r_r6 = tf->tf_r6; frame.sf_uc.sc_reg.r_r5 = tf->tf_r5; frame.sf_uc.sc_reg.r_r4 = tf->tf_r4; frame.sf_uc.sc_reg.r_r3 = tf->tf_r3; frame.sf_uc.sc_reg.r_r2 = tf->tf_r2; frame.sf_uc.sc_reg.r_r1 = tf->tf_r1; frame.sf_uc.sc_reg.r_r0 = tf->tf_r0; #ifdef SH4 if (CPU_IS_SH4) fpu_save(&frame.sf_uc.sc_fpreg); #endif frame.sf_uc.sc_onstack = onstack; frame.sf_uc.sc_expevt = tf->tf_expevt; /* frame.sf_uc.sc_err = 0; */ frame.sf_uc.sc_mask = mask; if (copyout(&frame, fp, sizeof(frame)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(p, SIGILL); /* NOTREACHED */ } tf->tf_r4 = sig; /* "signum" argument for handler */ tf->tf_r5 = (int)sip; /* "sip" argument for handler */ tf->tf_r6 = (int)&fp->sf_uc; /* "ucp" argument for handler */ tf->tf_spc = (int)catcher; tf->tf_r15 = (int)fp; tf->tf_pr = (int)p->p_sigcode; }
void linux_sendsig(const ksiginfo_t *ksi, const sigset_t *mask) { const int sig = ksi->ksi_signo; struct lwp *l = curlwp; struct proc *p = l->l_proc; struct trapframe *tf; sig_t catcher = SIGACTION(p, sig).sa_handler; struct linux_sigregs frame; struct linux_pt_regs linux_regs; struct linux_sigcontext sc; register_t fp; int onstack, error; int i; tf = trapframe(l); /* * Do we need to jump onto the signal stack? */ onstack = (l->l_sigstk.ss_flags & (SS_DISABLE | SS_ONSTACK)) == 0 && (SIGACTION(p, sig).sa_flags & SA_ONSTACK) != 0; /* * Signal stack is broken (see at the end of linux_sigreturn), so we do * not use it yet. XXX fix this. */ onstack=0; /* * Allocate space for the signal handler context. */ if (onstack) { fp = (register_t) ((char *)l->l_sigstk.ss_sp + l->l_sigstk.ss_size); } else { fp = tf->tf_fixreg[1]; } #ifdef DEBUG_LINUX printf("fp at start of linux_sendsig = %x\n", fp); #endif fp -= sizeof(struct linux_sigregs); fp &= ~0xf; /* * Prepare a sigcontext for later. */ memset(&sc, 0, sizeof sc); sc.lsignal = (int)native_to_linux_signo[sig]; sc.lhandler = (unsigned long)catcher; native_to_linux_old_extra_sigset(&sc.lmask, &sc._unused[3], mask); sc.lregs = (struct linux_pt_regs*)fp; /* * Setup the signal stack frame as Linux does it in * arch/ppc/kernel/signal.c:setup_frame() * * Save register context. */ for (i = 0; i < 32; i++) linux_regs.lgpr[i] = tf->tf_fixreg[i]; linux_regs.lnip = tf->tf_srr0; linux_regs.lmsr = tf->tf_srr1 & PSL_USERSRR1; linux_regs.lorig_gpr3 = tf->tf_fixreg[3]; /* XXX Is that right? */ linux_regs.lctr = tf->tf_ctr; linux_regs.llink = tf->tf_lr; linux_regs.lxer = tf->tf_xer; linux_regs.lccr = tf->tf_cr; linux_regs.lmq = 0; /* Unused, 601 only */ linux_regs.ltrap = tf->tf_exc; linux_regs.ldar = tf->tf_dar; linux_regs.ldsisr = tf->tf_dsisr; linux_regs.lresult = 0; memset(&frame, 0, sizeof(frame)); memcpy(&frame.lgp_regs, &linux_regs, sizeof(linux_regs)); #ifdef PPC_HAVE_FPU fpu_save(); #endif memcpy(&frame.lfp_regs, curpcb->pcb_fpu.fpreg, sizeof(frame.lfp_regs)); /* * Copy Linux's signal trampoline on the user stack It should not * be used, but Linux binaries might expect it to be there. */ frame.ltramp[0] = 0x38997777; /* li r0, 0x7777 */ frame.ltramp[1] = 0x44000002; /* sc */ /* * Move it to the user stack * There is a little trick here, about the LINUX_ABIGAP: the * linux_sigreg structure has a 56 int gap to support rs6000/xcoff * binaries. But the Linux kernel seems to do without it, and it * just skip it when building the stack frame. Hence the LINUX_ABIGAP. */ sendsig_reset(l, sig); mutex_exit(p->p_lock); error = copyout(&frame, (void *)fp, sizeof (frame) - LINUX_ABIGAP); if (error != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ mutex_enter(p->p_lock); sigexit(l, SIGILL); /* NOTREACHED */ } /* * Add a sigcontext on the stack */ fp -= sizeof(struct linux_sigcontext); error = copyout(&sc, (void *)fp, sizeof (struct linux_sigcontext)); mutex_enter(p->p_lock); if (error != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ sigexit(l, SIGILL); /* NOTREACHED */ } /* * Set the registers according to how the Linux process expects them. * "Mind the gap" Linux expects a gap here. */ tf->tf_fixreg[1] = fp - LINUX__SIGNAL_FRAMESIZE; tf->tf_lr = (int)catcher; tf->tf_fixreg[3] = (int)native_to_linux_signo[sig]; tf->tf_fixreg[4] = fp; tf->tf_srr0 = (int)p->p_sigctx.ps_sigcode; #ifdef DEBUG_LINUX printf("fp at end of linux_sendsig = %x\n", fp); #endif /* * Remember that we're now on the signal stack. */ if (onstack) l->l_sigstk.ss_flags |= SS_ONSTACK; #ifdef DEBUG_LINUX printf("linux_sendsig: exiting. fp=0x%lx\n",(long)fp); #endif }
static savearea_fpu *chudxnu_private_get_fp_regs(void) { fpu_save(current_act()->mact.curctx); // just in case it's live, save it return current_act()->mact.curctx->FPUsave; // take the top savearea (user or kernel) }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb and trap frame, making the child ready to run. * * Rig the child's kernel stack so that it will start out in * proc_trampoline() and call child_return() with p2 as an * argument. This causes the newly-created child process to go * directly to user level with an apparent return value of 0 from * fork(), while the parent process returns normally. * * p1 is the process being forked; if p1 == &proc0, we are creating * a kernel thread, and the return path and argument are specified with * `func' and `arg'. * * If an alternate user-level stack is requested (with non-zero values * in both the stack and stacksize args), set up the user stack pointer * accordingly. */ void cpu_fork(struct proc *p1, struct proc *p2, void *stack, size_t stacksize, void (*func)(void *), void *arg) { struct pcb *pcb; struct trapframe *tf; struct switchframe *sf; vaddr_t spbase, fptop; #define P1ADDR(x) (SH3_PHYS_TO_P1SEG(*__pmap_kpte_lookup(x) & PG_PPN)) KDASSERT(p1 == curproc || p1 == &proc0); bzero(&p2->p_md, sizeof(p2->p_md)); /* Copy flags */ p2->p_md.md_flags = p1->p_md.md_flags; pcb = NULL; /* XXXGCC: -Wuninitialized */ #ifdef SH3 /* * Convert frame pointer top to P1. because SH3 can't make * wired TLB entry, context store space accessing must not cause * exception. For SH3, we are 4K page, P3/P1 conversion don't * cause virtual-aliasing. */ if (CPU_IS_SH3) pcb = (struct pcb *)P1ADDR((vaddr_t)&p2->p_addr->u_pcb); #endif /* SH3 */ #ifdef SH4 /* SH4 can make wired entry, no need to convert to P1. */ if (CPU_IS_SH4) pcb = &p2->p_addr->u_pcb; #endif /* SH4 */ p2->p_md.md_pcb = pcb; fptop = (vaddr_t)pcb + PAGE_SIZE; /* set up the kernel stack pointer */ spbase = (vaddr_t)p2->p_addr + PAGE_SIZE; #ifdef P1_STACK /* Convert to P1 from P3 */ /* * wbinv u-area to avoid cache-aliasing, since kernel stack * is accessed from P1 instead of P3. */ if (SH_HAS_VIRTUAL_ALIAS) sh_dcache_wbinv_range((vaddr_t)p2->p_addr, USPACE); spbase = P1ADDR(spbase); #else /* !P1_STACK */ /* Prepare u-area PTEs */ #ifdef SH3 if (CPU_IS_SH3) sh3_switch_setup(p2); #endif #ifdef SH4 if (CPU_IS_SH4) sh4_switch_setup(p2); #endif #endif /* !P1_STACK */ #ifdef KSTACK_DEBUG /* Fill magic number for tracking */ memset((char *)fptop - PAGE_SIZE + sizeof(struct user), 0x5a, PAGE_SIZE - sizeof(struct user)); memset((char *)spbase, 0xa5, (USPACE - PAGE_SIZE)); memset(&pcb->pcb_sf, 0xb4, sizeof(struct switchframe)); #endif /* KSTACK_DEBUG */ /* * Copy the user context. */ p2->p_md.md_regs = tf = (struct trapframe *)fptop - 1; memcpy(tf, p1->p_md.md_regs, sizeof(struct trapframe)); /* * If specified, give the child a different stack. */ if (stack != NULL) tf->tf_r15 = (u_int)stack + stacksize; /* Setup switch frame */ sf = &pcb->pcb_sf; sf->sf_r11 = (int)arg; /* proc_trampoline hook func's arg */ sf->sf_r12 = (int)func; /* proc_trampoline hook func */ sf->sf_r15 = spbase + USPACE - PAGE_SIZE;/* current stack pointer */ sf->sf_r7_bank = sf->sf_r15; /* stack top */ sf->sf_r6_bank = (vaddr_t)tf; /* current frame pointer */ /* when switch to me, jump to proc_trampoline */ sf->sf_pr = (int)proc_trampoline; /* * Enable interrupt when switch frame is restored, since * kernel thread begin to run without restoring trapframe. */ sf->sf_sr = PSL_MD; /* kernel mode, interrupt enable */ #ifdef SH4 if (CPU_IS_SH4) { /* * Propagate floating point registers to the new process * (they are not in the trapframe). */ if (p1 == curproc) fpu_save(&p1->p_md.md_pcb->pcb_fp); bcopy(&p1->p_md.md_pcb->pcb_fp, &pcb->pcb_fp, sizeof(struct fpreg)); } #endif }
kern_return_t act_machine_get_state( thread_act_t thr_act, thread_flavor_t flavor, thread_state_t tstate, mach_msg_type_number_t *count) { #if MACH_ASSERT if (watchacts & WA_STATE) printf("act_%x act_machine_get_state(thr_act=%x,flav=%x,st=%x,cnt@%x=%x)\n", current_act(), thr_act, flavor, tstate, count, (count ? *count : 0)); #endif /* MACH_ASSERT */ switch (flavor) { case THREAD_STATE_FLAVOR_LIST: if (*count < 3) return (KERN_INVALID_ARGUMENT); tstate[0] = PPC_THREAD_STATE; tstate[1] = PPC_FLOAT_STATE; tstate[2] = PPC_EXCEPTION_STATE; *count = 3; return KERN_SUCCESS; case PPC_THREAD_STATE: { register struct ppc_thread_state *state; register struct ppc_saved_state *saved_state = USER_REGS(thr_act); if (*count < PPC_THREAD_STATE_COUNT) return KERN_INVALID_ARGUMENT; state = (struct ppc_thread_state *) tstate; state->r0 = saved_state->r0; state->r1 = saved_state->r1; state->r2 = saved_state->r2; state->r3 = saved_state->r3; state->r4 = saved_state->r4; state->r5 = saved_state->r5; state->r6 = saved_state->r6; state->r7 = saved_state->r7; state->r8 = saved_state->r8; state->r9 = saved_state->r9; state->r10 = saved_state->r10; state->r11 = saved_state->r11; state->r12 = saved_state->r12; state->r13 = saved_state->r13; state->r14 = saved_state->r14; state->r15 = saved_state->r15; state->r16 = saved_state->r16; state->r17 = saved_state->r17; state->r18 = saved_state->r18; state->r19 = saved_state->r19; state->r20 = saved_state->r20; state->r21 = saved_state->r21; state->r22 = saved_state->r22; state->r23 = saved_state->r23; state->r24 = saved_state->r24; state->r25 = saved_state->r25; state->r26 = saved_state->r26; state->r27 = saved_state->r27; state->r28 = saved_state->r28; state->r29 = saved_state->r29; state->r30 = saved_state->r30; state->r31 = saved_state->r31; state->cr = saved_state->cr; state->xer = saved_state->xer; state->lr = saved_state->lr; state->ctr = saved_state->ctr; state->srr0 = saved_state->srr0; /* * Only export the meaningful bits of the msr */ state->srr1 = MSR_REMOVE_SYSCALL_MARK(saved_state->srr1); state->mq = saved_state->mq; /* MQ register (601 only) */ *count = PPC_THREAD_STATE_COUNT; return KERN_SUCCESS; } case PPC_EXCEPTION_STATE: { register struct ppc_exception_state *state; register struct ppc_exception_state *pcb_state = &thr_act->mact.pcb->es; if (*count < PPC_EXCEPTION_STATE_COUNT) return KERN_INVALID_ARGUMENT; state = (struct ppc_exception_state *) tstate; bcopy((char *)pcb_state, (char *)state, sizeof(*state)); *count = PPC_EXCEPTION_STATE_COUNT; return KERN_SUCCESS; } case PPC_FLOAT_STATE: { register struct ppc_float_state *state; register struct ppc_float_state *float_state = &thr_act->mact.pcb->fs; if (*count < PPC_FLOAT_STATE_COUNT) return KERN_INVALID_ARGUMENT; fpu_save(); state = (struct ppc_float_state *) tstate; bcopy((char *)float_state, (char *)state, sizeof(*state)); *count = PPC_FLOAT_STATE_COUNT; return KERN_SUCCESS; } default: return KERN_INVALID_ARGUMENT; } }