int fill_fpregs(struct thread *td, struct fpreg *fpregs) { if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); memcpy(fpregs, &td->td_frame->f0, sizeof(struct fpreg)); return 0; }
int fill_fpregs(struct thread *td, struct fpreg *fpregs) { #if defined(CPU_HAVEFPU) if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); memcpy(fpregs, &td->td_frame->f0, sizeof(struct fpreg)); #endif return 0; }
/* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ static void freebsd32_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct fpreg32 fpregs; struct reg32 regs; struct sigacts *psp; struct sigframe32 sf, *sfp; int sig; int oonstack; unsigned i; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); fill_regs32(td, ®s); oonstack = sigonstack(td->td_frame->sp); /* save user context */ bzero(&sf, sizeof sf); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack.ss_sp = (int32_t)(intptr_t)td->td_sigstk.ss_sp; sf.sf_uc.uc_stack.ss_size = td->td_sigstk.ss_size; sf.sf_uc.uc_stack.ss_flags = td->td_sigstk.ss_flags; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs.r_regs[PC]; sf.sf_uc.uc_mcontext.mullo = regs.r_regs[MULLO]; sf.sf_uc.uc_mcontext.mulhi = regs.r_regs[MULHI]; sf.sf_uc.uc_mcontext.mc_tls = (int32_t)(intptr_t)td->td_md.md_tls; sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ for (i = 1; i < 32; i++) sf.sf_uc.uc_mcontext.mc_regs[i] = regs.r_regs[i]; sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); fill_fpregs32(td, &fpregs); for (i = 0; i < 33; i++) sf.sf_uc.uc_mcontext.mc_fpregs[i] = fpregs.r_regs[i]; } /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe32 *)(((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe32)) & ~(sizeof(__int64_t) - 1)); } else sfp = (struct sigframe32 *)((vm_offset_t)(td->td_frame->sp - sizeof(struct sigframe32)) & ~(sizeof(__int64_t) - 1)); /* Build the argument list for the signal handler. */ td->td_frame->a0 = sig; td->td_frame->a2 = (register_t)(intptr_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ td->td_frame->a1 = (register_t)(intptr_t)&sfp->sf_si; /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; sf.sf_si.si_addr = td->td_frame->badvaddr; } else { /* Old FreeBSD-style arguments. */ td->td_frame->a1 = ksi->ksi_code; td->td_frame->a3 = td->td_frame->badvaddr; /* sf.sf_ahu.sf_handler = catcher; */ } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(struct sigframe32)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); sigexit(td, SIGILL); } td->td_frame->pc = (register_t)(intptr_t)catcher; td->td_frame->t9 = (register_t)(intptr_t)catcher; td->td_frame->sp = (register_t)(intptr_t)sfp; /* * Signal trampoline code is at base of user stack. */ td->td_frame->ra = (register_t)(intptr_t)p->p_psstrings - *(p->p_sysent->sv_szsigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(register struct thread *td1,register struct proc *p2, struct thread *td2,int flags) { register struct proc *p1; struct pcb *pcb2; p1 = td1->td_proc; if ((flags & RFPROC) == 0) return; /* It is assumed that the vm_thread_alloc called * cpu_thread_alloc() before cpu_fork is called. */ /* Point the pcb to the top of the stack */ pcb2 = td2->td_pcb; /* Copy p1's pcb, note that in this case * our pcb also includes the td_frame being copied * too. The older mips2 code did an additional copy * of the td_frame, for us that's not needed any * longer (this copy does them both) */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point mdproc and then copy over td1's contents * md_proc is empty for MIPS */ td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; /* * Set up return-value registers as fork() libc stub expects. */ td2->td_frame->v0 = 0; td2->td_frame->v1 = 1; td2->td_frame->a3 = 0; if (td1 == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td1); pcb2->pcb_context[PCB_REG_RA] = (register_t)(intptr_t)fork_trampoline; /* Make sp 64-bit aligned */ pcb2->pcb_context[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & ~(sizeof(__int64_t) - 1)) - CALLFRAME_SIZ); pcb2->pcb_context[PCB_REG_S0] = (register_t)(intptr_t)fork_return; pcb2->pcb_context[PCB_REG_S1] = (register_t)(intptr_t)td2; pcb2->pcb_context[PCB_REG_S2] = (register_t)(intptr_t)td2->td_frame; pcb2->pcb_context[PCB_REG_SR] = mips_rd_status() & (MIPS_SR_KX | MIPS_SR_UX | MIPS_SR_INT_MASK); /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * and/or bits in other standard MIPS registers (if CPU-Specific) * that are needed. */ td2->td_md.md_tls = td1->td_md.md_tls; td2->td_md.md_saved_intr = MIPS_SR_INT_IE; td2->td_md.md_spinlock_count = 1; #ifdef CPU_CNMIPS if (td1->td_md.md_flags & MDTD_COP2USED) { if (td1->td_md.md_cop2owner == COP2_OWNER_USERLAND) { if (td1->td_md.md_ucop2) octeon_cop2_save(td1->td_md.md_ucop2); else panic("cpu_fork: ucop2 is NULL but COP2 is enabled"); } else { if (td1->td_md.md_cop2) octeon_cop2_save(td1->td_md.md_cop2); else panic("cpu_fork: cop2 is NULL but COP2 is enabled"); } } if (td1->td_md.md_cop2) { td2->td_md.md_cop2 = octeon_cop2_alloc_ctx(); memcpy(td2->td_md.md_cop2, td1->td_md.md_cop2, sizeof(*td1->td_md.md_cop2)); } if (td1->td_md.md_ucop2) { td2->td_md.md_ucop2 = octeon_cop2_alloc_ctx(); memcpy(td2->td_md.md_ucop2, td1->td_md.md_ucop2, sizeof(*td1->td_md.md_ucop2)); } td2->td_md.md_cop2owner = td1->td_md.md_cop2owner; pcb2->pcb_context[PCB_REG_SR] |= MIPS_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; /* Clear COP2 bits for userland & kernel */ td2->td_frame->sr &= ~MIPS_SR_COP_2_BIT; pcb2->pcb_context[PCB_REG_SR] &= ~MIPS_SR_COP_2_BIT; #endif }
/* * The CheriABI version of sendsig(9) largely borrows from the MIPS version, * and it is important to keep them in sync. It differs primarily in that it * must also be aware of user stack-handling ABIs, so is also sensitive to our * (fluctuating) design choices in how $stc and $sp interact. The current * design uses ($stc + $sp) for stack-relative references, so early on we have * to calculate a 'relocated' version of $sp that we can then use for * MIPS-style access. * * This code, as with the CHERI-aware MIPS code, makes a privilege * determination in order to decide whether to trust the stack exposed by the * user code for the purposes of signal handling. We must use the alternative * stack if there is any indication that using the user thread's stack state * might violate the userspace compartmentalisation model. */ static void cheriabi_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct sigacts *psp; struct sigframe_c sf, *sfp; uintptr_t stackbase; vm_offset_t sp; int cheri_is_sandboxed; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; /* * In CheriABI, $sp is $stc relative, so calculate a relocation base * that must be combined with regs->sp from this point onwards. * Unfortunately, we won't retain bounds and permissions information * (as is the case elsewhere in CheriABI). While 'stackbase' * suggests that $stc's offset isn't included, in practice it will be, * although we may reasonably assume that it will be zero. * * If it turns out we will be delivering to the alternative signal * stack, we'll recalculate stackbase later. */ CHERI_CLC(CHERI_CR_CTEMP0, CHERI_CR_KDC, &td->td_pcb->pcb_regs.stc, 0); CHERI_CTOPTR(stackbase, CHERI_CR_CTEMP0, CHERI_CR_KDC); oonstack = sigonstack(stackbase + regs->sp); /* * CHERI affects signal delivery in the following ways: * * (1) Additional capability-coprocessor state is exposed via * extensions to the context frame placed on the stack. * * (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we * consider user state to be 'sandboxed' and therefore to require * special delivery handling which includes a domain-switch to the * thread's context-switch domain. (This is done by * cheri_sendsig()). * * (3) If an alternative signal stack is not defined, and we are in a * 'sandboxed' state, then we have two choices: (a) if the signal * is of type SA_SANDBOX_UNWIND, we will automatically unwind the * trusted stack by one frame; (b) otherwise, we will terminate * the process unconditionally. */ cheri_is_sandboxed = cheri_signal_sandboxed(td); /* * We provide the ability to drop into the debugger in two different * circumstances: (1) if the code running is sandboxed; and (2) if the * fault is a CHERI protection fault. Handle both here for the * non-unwind case. Do this before we rewrite any general-purpose or * capability register state for the thread. */ #if DDB if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal) kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox"); else if (sig == SIGPROT && security_cheri_debugger_on_sigprot) kdb_enter(KDB_WHY_CHERI, "SIGPROT delivered outside sandbox"); #endif /* * If a thread is running sandboxed, we can't rely on $sp which may * not point at a valid stack in the ambient context, or even be * maliciously manipulated. We must therefore always use the * alternative stack. We are also therefore unable to tell whether we * are on the alternative stack, so must clear 'oonstack' here. * * XXXRW: This requires significant further thinking; however, the net * upshot is that it is not a good idea to do an object-capability * invoke() from a signal handler, as with so many other things in * life. */ if (cheri_is_sandboxed != 0) oonstack = 0; /* save user context */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; #if 0 /* * XXX-BD: stack_t type differs and we can't just fake a capabilty. * We don't restore the value so what purpose does it serve? */ sf.sf_uc.uc_stack = td->td_sigstk; #endif sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; cheri_capability_copy(&sf.sf_uc.uc_mcontext.mc_tls, &td->td_md.md_tls_cap); sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; #if defined(CPU_HAVEFPU) if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } #endif /* XXXRW: sf.sf_uc.uc_mcontext.sr seems never to be set? */ sf.sf_uc.uc_mcontext.cause = regs->cause; cheri_trapframe_to_cheriframe(&td->td_pcb->pcb_regs, &sf.sf_uc.uc_mcontext.mc_cheriframe); /* * Allocate and validate space for the signal handler context. For * CheriABI purposes, 'sp' from this point forward is relocated * relative to any pertinent stack capability. For an alternative * signal context, we need to recalculate stackbase for later use in * calculating a new $sp for the signal-handling context. * * XXXRW: It seems like it would be nice to both the regular and * alternative stack calculations in the same place. However, we need * oonstack sooner. We should clean this up later. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { stackbase = (vm_offset_t)td->td_sigstk.ss_sp; sp = (vm_offset_t)(stackbase + td->td_sigstk.ss_size); } else { /* * Signals delivered when a CHERI sandbox is present must be * delivered on the alternative stack rather than a local one. * If an alternative stack isn't present, then terminate or * risk leaking capabilities (and control) to the sandbox (or * just crashing the sandbox). */ if (cheri_is_sandboxed) { mtx_unlock(&psp->ps_mtx); printf("pid %d, tid %d: signal in sandbox without " "alternative stack defined\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } sp = (vm_offset_t)(stackbase + regs->sp); } sp -= sizeof(struct sigframe_c); /* For CHERI, keep the stack pointer capability aligned. */ sp &= ~(CHERICAP_SIZE - 1); sfp = (void *)sp; /* Build the argument list for the signal handler. */ regs->a0 = sig; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* * Signal handler installed with SA_SIGINFO. * * XXXRW: We would ideally synthesise these from the * user-originated stack capability, rather than $kdc, to be * on the safe side. */ cheri_capability_set(®s->c3, CHERI_CAP_USER_DATA_PERMS, (void *)(intptr_t)&sfp->sf_si, sizeof(sfp->sf_si), 0); cheri_capability_set(®s->c4, CHERI_CAP_USER_DATA_PERMS, (void *)(intptr_t)&sfp->sf_uc, sizeof(sfp->sf_uc), 0); /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; /* * Write out badvaddr, but don't create a valid capability * since that might allow privilege amplification. * * XXX-BD: This probably isn't the right method. * XXX-BD: Do we want to set base or offset? * * XXXRW: I think there's some argument that anything * receiving this signal is fairly privileged. But we could * generate a $ddc-relative (or $pcc-relative) capability, if * possible. (Using versions if $ddc and $pcc for the * signal-handling context rather than that which caused the * signal). I'd be tempted to deliver badvaddr as the offset * of that capability. If badvaddr is not in range, then we * should just deliver an untagged NULL-derived version * (perhaps)? */ *((uintptr_t *)&sf.sf_si.si_addr) = (uintptr_t)(void *)regs->badvaddr; } /* * XXX: No support for undocumented arguments to old style handlers. */ mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyoutcap(&sf, (void *)sfp, sizeof(sf)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); printf("pid %d, tid %d: could not copy out sigframe\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } /* * Re-acquire process locks necessary to access suitable pcb fields. * However, arguably, these operations should be atomic with the * initial inspection of 'psp'. */ PROC_LOCK(p); mtx_lock(&psp->ps_mtx); /* * Install CHERI signal-delivery register state for handler to run * in. As we don't install this in the CHERI frame on the user stack, * it will be (generally) be removed automatically on sigreturn(). */ /* XXX-BD: this isn't quite right */ cheri_sendsig(td); /* * Note that $sp must be installed relative to $stc, so re-subtract * the stack base here. */ regs->pc = (register_t)(intptr_t)catcher; regs->sp = (register_t)((intptr_t)sfp - stackbase); cheri_capability_copy(®s->c12, &psp->ps_sigcap[_SIG_IDX(sig)]); cheri_capability_copy(®s->c17, &td->td_pcb->pcb_cherisignal.csig_sigcode); }
/* * Finish a fork operation, with process p2 nearly set up. * Copy and update the pcb, set up the stack so that the child * ready to run and return to user mode. */ void cpu_fork(register struct thread *td1,register struct proc *p2, struct thread *td2,int flags) { register struct proc *p1; struct pcb *pcb2; p1 = td1->td_proc; if ((flags & RFPROC) == 0) return; /* It is assumed that the vm_thread_alloc called * cpu_thread_alloc() before cpu_fork is called. */ /* Point the pcb to the top of the stack */ pcb2 = td2->td_pcb; /* Copy p1's pcb, note that in this case * our pcb also includes the td_frame being copied * too. The older mips2 code did an additional copy * of the td_frame, for us thats not needed any * longer (this copy does them both */ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2)); /* Point mdproc and then copy over td1's contents * md_proc is empty for MIPS */ td2->td_md.md_flags = td1->td_md.md_flags & MDTD_FPUSED; /* * Set up return-value registers as fork() libc stub expects. */ td2->td_frame->v0 = 0; td2->td_frame->v1 = 1; td2->td_frame->a3 = 0; if (td1 == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td1); pcb2->pcb_context.val[PCB_REG_RA] = (register_t)fork_trampoline; /* Make sp 64-bit aligned */ pcb2->pcb_context.val[PCB_REG_SP] = (register_t)(((vm_offset_t)td2->td_pcb & ~(sizeof(__int64_t) - 1)) - STAND_FRAME_SIZE); pcb2->pcb_context.val[PCB_REG_S0] = (register_t)fork_return; pcb2->pcb_context.val[PCB_REG_S1] = (register_t)td2; pcb2->pcb_context.val[PCB_REG_S2] = (register_t)td2->td_frame; pcb2->pcb_context.val[PCB_REG_SR] = SR_INT_MASK; /* * FREEBSD_DEVELOPERS_FIXME: * Setup any other CPU-Specific registers (Not MIPS Standard) * and/or bits in other standard MIPS registers (if CPU-Specific) * that are needed. */ td2->td_md.md_saved_intr = MIPS_SR_INT_IE; td2->td_md.md_spinlock_count = 1; #ifdef TARGET_OCTEON pcb2->pcb_context.val[PCB_REG_SR] |= MIPS_SR_COP_2_BIT | MIPS32_SR_PX | MIPS_SR_UX | MIPS_SR_KX | MIPS_SR_SX; #endif }
static void cheriabi_sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct cheri_frame *capreg; struct sigacts *psp; struct sigframe_c sf, *sfp; vm_offset_t sp; int cheri_is_sandboxed; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; capreg = &td->td_pcb->pcb_cheriframe; oonstack = sigonstack(regs->sp); /* * CHERI affects signal delivery in the following ways: * * (1) Additional capability-coprocessor state is exposed via * extensions to the context frame placed on the stack. * * (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we * consider user state to be 'sandboxed' and therefore to require * special delivery handling which includes a domain-switch to the * thread's context-switch domain. (This is done by * cheri_sendsig()). * * (3) If an alternative signal stack is not defined, and we are in a * 'sandboxed' state, then we have two choices: (a) if the signal * is of type SA_SANDBOX_UNWIND, we will automatically unwind the * trusted stack by one frame; (b) otherwise, we will terminate * the process unconditionally. */ cheri_is_sandboxed = cheri_signal_sandboxed(td); /* * We provide the ability to drop into the sandbox in two different * circumstances: (1) if the code running is sandboxed; and (2) if the * fault is a CHERI protection fault. Handle both here for the * non-unwind case. Do this before we rewrite any general-purpose or * capability register state for the thread. */ #if DDB if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal) kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox"); else if (sig == SIGPROT && security_cheri_debugger_on_sigprot) kdb_enter(KDB_WHY_CHERI, "SIGPROT delivered outside sandbox"); #endif /* * If a thread is running sandboxed, we can't rely on $sp which may * not point at a valid stack in the ambient context, or even be * maliciously manipulated. We must therefore always use the * alternative stack. We are also therefore unable to tell whether we * are on the alternative stack, so must clear 'oonstack' here. * * XXXRW: This requires significant further thinking; however, the net * upshot is that it is not a good idea to do an object-capability * invoke() from a signal handler, as with so many other things in * life. */ if (cheri_is_sandboxed != 0) oonstack = 0; /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; #if 0 /* * XXX-BD: stack_t type differs and we can't just fake a capabilty. * We don't restore the value so what purpose does it serve? */ sf.sf_uc.uc_stack = td->td_sigstk; #endif sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; #if 0 /* XXX-BD: what actually makes sense here? */ sf.sf_uc.uc_mcontext.mc_tls = td->td_md.md_tls; #endif sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } /* XXXRW: sf.sf_uc.uc_mcontext.sr seems never to be set? */ sf.sf_uc.uc_mcontext.cause = regs->cause; cheri_memcpy(&sf.sf_uc.uc_mcontext.mc_cheriframe, &td->td_pcb->pcb_cheriframe, sizeof(struct cheri_frame)); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (vm_offset_t)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { /* * Signals delivered when a CHERI sandbox is present must be * delivered on the alternative stack rather than a local one. * If an alternative stack isn't present, then terminate or * risk leaking capabilities (and control) to the sandbox (or * just crashing the sandbox). */ if (cheri_is_sandboxed) { mtx_unlock(&psp->ps_mtx); printf("pid %d, tid %d: signal in sandbox without " "alternative stack defined\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } sp = (vm_offset_t)regs->sp; } sp -= sizeof(struct sigframe_c); /* For CHERI, keep the stack pointer capability aligned. */ sp &= ~(CHERICAP_SIZE - 1); sfp = (void *)sp; /* Build the argument list for the signal handler. */ regs->a0 = sig; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ cheri_capability_set(&capreg->cf_c3, CHERI_CAP_USER_DATA_PERMS, CHERI_CAP_USER_DATA_OTYPE, (void *)(intptr_t)&sfp->sf_si, sizeof(sfp->sf_si), 0); cheri_capability_set(&capreg->cf_c4, CHERI_CAP_USER_DATA_PERMS, CHERI_CAP_USER_DATA_OTYPE, (void *)(intptr_t)&sfp->sf_uc, sizeof(sfp->sf_uc), 0); /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; /* * Write out badvaddr, but don't create a valid capability * since that might allow privlege amplification. * * XXX-BD: This probably isn't the right method. * XXX-BD: Do we want to set base or offset? */ *((uintptr_t *)&sf.sf_si.si_addr) = (uintptr_t)(void *)regs->badvaddr; } /* * XXX: No support for undocumented arguments to old style handlers. */ mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyoutcap(&sf, sfp, sizeof(sf)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); sigexit(td, SIGILL); /* NOTREACHED */ } /* * Install CHERI signal-delivery register state for handler to run * in. As we don't install this in the CHERI frame on the user stack, * it will be (generally) be removed automatically on sigreturn(). */ /* XXX-BD: this isn't quite right */ cheri_sendsig(td); regs->pc = (register_t)(intptr_t)catcher; regs->sp = (register_t)(intptr_t)sfp; cheri_capability_copy(&capreg->cf_c12, &psp->ps_sigcap[_SIG_IDX(sig)]); cheri_capability_copy(&capreg->cf_c17, &td->td_pcb->pcb_cherisignal.csig_sigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }
/* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; struct sigacts *psp; struct sigframe sf, *sfp; int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->sp); /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; sf.sf_uc.uc_mcontext.mc_tls = td->td_md.md_tls; sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sfp = (struct sigframe *)((vm_offset_t)(td->td_sigstk.ss_sp + td->td_sigstk.ss_size - sizeof(struct sigframe)) & ~(sizeof(__int64_t) - 1)); } else sfp = (struct sigframe *)((vm_offset_t)(regs->sp - sizeof(struct sigframe)) & ~(sizeof(__int64_t) - 1)); /* Translate the signal if appropriate */ if (p->p_sysent->sv_sigtbl) { if (sig <= p->p_sysent->sv_sigsize) sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; } /* Build the argument list for the signal handler. */ regs->a0 = sig; regs->a2 = (register_t)(intptr_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ regs->a1 = (register_t)(intptr_t)&sfp->sf_si; /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; sf.sf_si.si_addr = (void*)(intptr_t)regs->badvaddr; } else { /* Old FreeBSD-style arguments. */ regs->a1 = ksi->ksi_code; regs->a3 = regs->badvaddr; /* sf.sf_ahu.sf_handler = catcher; */ } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); sigexit(td, SIGILL); } regs->pc = (register_t)(intptr_t)catcher; regs->t9 = (register_t)(intptr_t)catcher; regs->sp = (register_t)(intptr_t)sfp; /* * Signal trampoline code is at base of user stack. */ regs->ra = (register_t)(intptr_t)PS_STRINGS - *(p->p_sysent->sv_szsigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }
/* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *regs; #ifdef CPU_CHERI struct cheri_frame *cfp; #endif struct sigacts *psp; struct sigframe sf, *sfp; vm_offset_t sp; #ifdef CPU_CHERI size_t cp2_len; int cheri_is_sandboxed; #endif int sig; int oonstack; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); regs = td->td_frame; oonstack = sigonstack(regs->sp); #ifdef CPU_CHERI /* * CHERI affects signal delivery in the following ways: * * (1) Additional capability-coprocessor state is exposed via * extensions to the context frame placed on the stack. * * (2) If the user $pcc doesn't include CHERI_PERM_SYSCALL, then we * consider user state to be 'sandboxed' and therefore to require * special delivery handling which includes a domain-switch to the * thread's context-switch domain. (This is done by * cheri_sendsig()). * * (3) If an alternative signal stack is not defined, and we are in a * 'sandboxed' state, then we have two choices: (a) if the signal * is of type SA_SANDBOX_UNWIND, we will automatically unwind the * trusted stack by one frame; (b) otherwise, we will terminate * the process unconditionally. */ cheri_is_sandboxed = cheri_signal_sandboxed(td); /* * We provide the ability to drop into the debugger in two different * circumstances: (1) if the code running is sandboxed; and (2) if the * fault is a CHERI protection fault. Handle both here for the * non-unwind case. Do this before we rewrite any general-purpose or * capability register state for the thread. */ #if DDB if (cheri_is_sandboxed && security_cheri_debugger_on_sandbox_signal) kdb_enter(KDB_WHY_CHERI, "Signal delivery to CHERI sandbox"); else if (sig == SIGPROT && security_cheri_debugger_on_sigprot) kdb_enter(KDB_WHY_CHERI, "SIGPROT delivered outside sandbox"); #endif /* * If a thread is running sandboxed, we can't rely on $sp which may * not point at a valid stack in the ambient context, or even be * maliciously manipulated. We must therefore always use the * alternative stack. We are also therefore unable to tell whether we * are on the alternative stack, so must clear 'oonstack' here. * * XXXRW: This requires significant further thinking; however, the net * upshot is that it is not a good idea to do an object-capability * invoke() from a signal handler, as with so many other things in * life. */ if (cheri_is_sandboxed != 0) oonstack = 0; #endif /* save user context */ bzero(&sf, sizeof(sf)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_pc = regs->pc; sf.sf_uc.uc_mcontext.mullo = regs->mullo; sf.sf_uc.uc_mcontext.mulhi = regs->mulhi; sf.sf_uc.uc_mcontext.mc_tls = td->td_md.md_tls; sf.sf_uc.uc_mcontext.mc_regs[0] = UCONTEXT_MAGIC; /* magic number */ bcopy((void *)®s->ast, (void *)&sf.sf_uc.uc_mcontext.mc_regs[1], sizeof(sf.sf_uc.uc_mcontext.mc_regs) - sizeof(register_t)); sf.sf_uc.uc_mcontext.mc_fpused = td->td_md.md_flags & MDTD_FPUSED; #if defined(CPU_HAVEFPU) if (sf.sf_uc.uc_mcontext.mc_fpused) { /* if FPU has current state, save it first */ if (td == PCPU_GET(fpcurthread)) MipsSaveCurFPState(td); bcopy((void *)&td->td_frame->f0, (void *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(sf.sf_uc.uc_mcontext.mc_fpregs)); } #endif /* XXXRW: sf.sf_uc.uc_mcontext.sr seems never to be set? */ sf.sf_uc.uc_mcontext.cause = regs->cause; /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sp = (vm_offset_t)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { #ifdef CPU_CHERI /* * Signals delivered when a CHERI sandbox is present must be * delivered on the alternative stack rather than a local one. * If an alternative stack isn't present, then terminate or * risk leaking capabilities (and control) to the sandbox (or * just crashing the sandbox). */ if (cheri_is_sandboxed) { mtx_unlock(&psp->ps_mtx); printf("pid %d, tid %d: signal in sandbox without " "alternative stack defined\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } #endif sp = (vm_offset_t)regs->sp; } #ifdef CPU_CHERI cp2_len = sizeof(*cfp); sp -= cp2_len; sp &= ~(CHERICAP_SIZE - 1); sf.sf_uc.uc_mcontext.mc_cp2state = sp; sf.sf_uc.uc_mcontext.mc_cp2state_len = cp2_len; #endif sp -= sizeof(struct sigframe); #ifdef CPU_CHERI /* For CHERI, keep the stack pointer capability aligned. */ sp &= ~(CHERICAP_SIZE - 1); #else sp &= ~(sizeof(__int64_t) - 1); #endif sfp = (struct sigframe *)sp; /* Build the argument list for the signal handler. */ regs->a0 = sig; regs->a2 = (register_t)(intptr_t)&sfp->sf_uc; if (SIGISMEMBER(psp->ps_siginfo, sig)) { /* Signal handler installed with SA_SIGINFO. */ regs->a1 = (register_t)(intptr_t)&sfp->sf_si; /* sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; */ /* fill siginfo structure */ sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; sf.sf_si.si_code = ksi->ksi_code; sf.sf_si.si_addr = (void*)(intptr_t)regs->badvaddr; } else { /* Old FreeBSD-style arguments. */ regs->a1 = ksi->ksi_code; regs->a3 = regs->badvaddr; /* sf.sf_ahu.sf_handler = catcher; */ } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); /* * Copy the sigframe out to the user's stack. */ #ifdef CPU_CHERI cfp = malloc(sizeof(*cfp), M_TEMP, M_WAITOK); cheri_trapframe_to_cheriframe(&td->td_pcb->pcb_regs, cfp); if (copyoutcap(cfp, (void *)sf.sf_uc.uc_mcontext.mc_cp2state, cp2_len) != 0) { free(cfp, M_TEMP); PROC_LOCK(p); printf("pid %d, tid %d: could not copy out cheriframe\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } free(cfp, M_TEMP); #endif if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { /* * Something is wrong with the stack pointer. * ...Kill the process. */ PROC_LOCK(p); printf("pid %d, tid %d: could not copy out sigframe\n", td->td_proc->p_pid, td->td_tid); sigexit(td, SIGILL); /* NOTREACHED */ } #ifdef CPU_CHERI /* * Install CHERI signal-delivery register state for handler to run * in. As we don't install this in the CHERI frame on the user stack, * it will be (genrally) be removed automatically on sigreturn(). */ cheri_sendsig(td); #endif regs->pc = (register_t)(intptr_t)catcher; regs->t9 = (register_t)(intptr_t)catcher; regs->sp = (register_t)(intptr_t)sfp; /* * Signal trampoline code is at base of user stack. */ regs->ra = (register_t)(intptr_t)PS_STRINGS - *(p->p_sysent->sv_szsigcode); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); }