int sigreturn(struct proc *p, struct sigreturn_args *uap, __unused int *retval) { void *state; int onstack = 0; int error, flavor; thread_t thread; struct uthread *ut; struct mcontext mctx32; struct user_ucontext32 uctx32; mach_msg_type_number_t state_count; thread = current_thread(); ut = get_bsdthread_info(thread); /* * Retrieve the user context that contains our machine context. */ if ((error = copyin(uap->uctx, (void *)&uctx32, sizeof(struct user_ucontext32))) != KERN_SUCCESS) return (error); /* * Validate that our machine context is the right size. */ if (uctx32.uc_mcsize != UC_FLAVOR_SIZE) return (EINVAL); /* * Populate our machine context info that we need to restore. */ if ((error = copyin(uctx32.uc_mcontext, (void *)&mctx32, UC_FLAVOR_SIZE)) != KERN_SUCCESS) return (error); /* * Restore the signal mask. */ ut->uu_sigmask = uctx32.uc_sigmask & ~sigcantmask; /* * Restore other signal info. */ if ((uctx32.uc_onstack & 0x01)) ut->uu_sigstk.ss_flags |= SA_ONSTACK; else ut->uu_sigstk.ss_flags &= ~SA_ONSTACK; if (ut->uu_siglist & ~ut->uu_sigmask) signal_setast(thread); /* * Restore the states from our machine context. * NOTE: we don't really need to check on infostyle since state restoring * for UC_TRAD and UC_FLAVOR is identical on this architecture. */ flavor = ARM_THREAD_STATE; state = (void *)&mctx32.ss; state_count = ARM_THREAD_STATE_COUNT; if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) return (EINVAL); flavor = ARM_VFP_STATE; state = (void *)&mctx32.fs; state_count = ARM_VFP_STATE_COUNT; if (thread_setstatus(thread, flavor, (thread_state_t)state, state_count) != KERN_SUCCESS) return (EINVAL); return (EJUSTRETURN); }
/* ARGSUSED */ int sigreturn( struct proc * p, struct sigreturn_args * uap, __unused int *retval) { union { user_ucontext32_t uc32; #if defined(__arm64__) user_ucontext64_t uc64; #endif } uctx; union { mcontext32_t mc32; #if defined(__arm64__) mcontext64_t mc64; #endif } mctx; struct sigacts *ps = p->p_sigacts; int error, sigmask = 0, onstack = 0; thread_t th_act; struct uthread *ut; uint32_t sigreturn_validation; user_addr_t token_uctx; kern_return_t kr; th_act = current_thread(); ut = (struct uthread *) get_bsdthread_info(th_act); if (proc_is64bit_data(p)) { #if defined(__arm64__) error = sigreturn_copyin_ctx64(&uctx.uc64, &mctx.mc64, uap->uctx); if (error != 0) { return error; } onstack = uctx.uc64.uc_onstack; sigmask = uctx.uc64.uc_sigmask; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { error = sigreturn_copyin_ctx32(&uctx.uc32, &mctx.mc32, uap->uctx); if (error != 0) { return error; } onstack = uctx.uc32.uc_onstack; sigmask = uctx.uc32.uc_sigmask; } if ((onstack & 01)) p->p_sigacts->ps_sigstk.ss_flags |= SA_ONSTACK; else p->p_sigacts->ps_sigstk.ss_flags &= ~SA_ONSTACK; ut->uu_sigmask = sigmask & ~sigcantmask; if (ut->uu_siglist & ~ut->uu_sigmask) signal_setast(current_thread()); sigreturn_validation = atomic_load_explicit( &ps->ps_sigreturn_validation, memory_order_relaxed); token_uctx = uap->uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); if (proc_is64bit_data(p)) { #if defined(__arm64__) user64_addr_t token; token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; if ((user64_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%llx expected 0x%llx\n", p->p_comm, p->p_pid, (user64_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { return EINVAL; } } error = sigreturn_set_state64(th_act, &mctx.mc64); if (error != 0) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn set_state64 error %d\n", p->p_comm, p->p_pid, error); #endif /* DEVELOPMENT || DEBUG */ return error; } #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { user32_addr_t token; token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token; if ((user32_addr_t)uap->token != token) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn token mismatch: received 0x%x expected 0x%x\n", p->p_comm, p->p_pid, (user32_addr_t)uap->token, token); #endif /* DEVELOPMENT || DEBUG */ if (sigreturn_validation != PS_SIGRETURN_VALIDATION_DISABLED) { return EINVAL; } } error = sigreturn_set_state32(th_act, &mctx.mc32); if (error != 0) { #if DEVELOPMENT || DEBUG printf("process %s[%d] sigreturn sigreturn_set_state32 error %d\n", p->p_comm, p->p_pid, error); #endif /* DEVELOPMENT || DEBUG */ return error; } } return (EJUSTRETURN); }
static void fasttrap_return_common(proc_t *p, arm_saved_state_t *regs, user_addr_t pc, user_addr_t new_pc) { pid_t pid = p->p_pid; fasttrap_tracepoint_t *tp; fasttrap_bucket_t *bucket; fasttrap_id_t *id; lck_mtx_t *pid_mtx; int retire_tp = 1; pid_mtx = &cpu_core[CPU->cpu_id].cpuc_pid_lock; lck_mtx_lock(pid_mtx); bucket = &fasttrap_tpoints.fth_table[FASTTRAP_TPOINTS_INDEX(pid, pc)]; for (tp = bucket->ftb_data; tp != NULL; tp = tp->ftt_next) { if (pid == tp->ftt_pid && pc == tp->ftt_pc && tp->ftt_proc->ftpc_acount != 0) break; } /* * Don't sweat it if we can't find the tracepoint again; unlike * when we're in fasttrap_pid_probe(), finding the tracepoint here * is not essential to the correct execution of the process. */ if (tp == NULL) { lck_mtx_unlock(pid_mtx); return; } for (id = tp->ftt_retids; id != NULL; id = id->fti_next) { fasttrap_probe_t *probe = id->fti_probe; /* * If there's a branch that could act as a return site, we * need to trace it, and check here if the program counter is * external to the function. */ if (tp->ftt_type != FASTTRAP_T_LDM_PC && tp->ftt_type != FASTTRAP_T_POP_PC && new_pc - probe->ftp_faddr < probe->ftp_fsize) continue; if (probe->ftp_prov->ftp_provider_type == DTFTP_PROVIDER_ONESHOT) { uint8_t already_triggered = atomic_or_8(&probe->ftp_triggered, 1); if (already_triggered) { continue; } } /* * If we have at least one probe associated that * is not a oneshot probe, don't remove the * tracepoint */ else { retire_tp = 0; } #ifndef CONFIG_EMBEDDED if (ISSET(current_proc()->p_lflag, P_LNOATTACH)) { dtrace_probe(dtrace_probeid_error, 0 /* state */, id->fti_probe->ftp_id, 1 /* ndx */, -1 /* offset */, DTRACEFLT_UPRIV); #else if (FALSE) { #endif } else { dtrace_probe(id->fti_probe->ftp_id, pc - id->fti_probe->ftp_faddr, regs->r[0], 0, 0, 0); } } if (retire_tp) { fasttrap_tracepoint_retire(p, tp); } lck_mtx_unlock(pid_mtx); } static void fasttrap_sigsegv(proc_t *p, uthread_t t, user_addr_t addr, arm_saved_state_t *regs) { /* TODO: This function isn't implemented yet. In debug mode, panic the system to * find out why we're hitting this point. In other modes, kill the process. */ #if DEBUG #pragma unused(p,t,addr,arm_saved_state) panic("fasttrap: sigsegv not yet implemented"); #else #pragma unused(p,t,addr) /* Kill the process */ regs->pc = 0; #endif #if 0 proc_lock(p); /* Set fault address and mark signal */ t->uu_code = addr; t->uu_siglist |= sigmask(SIGSEGV); /* * XXX These two line may be redundant; if not, then we need * XXX to potentially set the data address in the machine * XXX specific thread state structure to indicate the address. */ t->uu_exception = KERN_INVALID_ADDRESS; /* SIGSEGV */ t->uu_subcode = 0; /* XXX pad */ proc_unlock(p); /* raise signal */ signal_setast(t->uu_context.vc_thread); #endif }