/* * Send an interrupt to process. * */ void sendsig( struct proc * p, user_addr_t catcher, int sig, int mask, __unused uint32_t code ) { union { struct ts32 { arm_thread_state_t ss; } ts32; #if defined(__arm64__) struct ts64 { arm_thread_state64_t ss; } ts64; #endif } ts; union { struct user_sigframe32 uf32; #if defined(__arm64__) struct user_sigframe64 uf64; #endif } user_frame; user_siginfo_t sinfo; user_addr_t sp = 0, trampact; struct sigacts *ps = p->p_sigacts; int oonstack, infostyle; thread_t th_act; struct uthread *ut; user_size_t stack_size = 0; user_addr_t p_uctx, token_uctx; kern_return_t kr; th_act = current_thread(); ut = get_bsdthread_info(th_act); bzero(&ts, sizeof(ts)); bzero(&user_frame, sizeof(user_frame)); if (p->p_sigacts->ps_siginfo & sigmask(sig)) infostyle = UC_FLAVOR; else infostyle = UC_TRAD; trampact = ps->ps_trampact[sig]; oonstack = ps->ps_sigstk.ss_flags & SA_ONSTACK; /* * Get sundry thread state. */ if (proc_is64bit_data(p)) { #ifdef __arm64__ if (sendsig_get_state64(th_act, &ts.ts64.ss, &user_frame.uf64.mctx) != 0) { goto bad2; } #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { if (sendsig_get_state32(th_act, &ts.ts32.ss, &user_frame.uf32.mctx) != 0) { goto bad2; } } /* * Figure out where our new stack lives. */ if ((ps->ps_flags & SAS_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ps->ps_sigstk.ss_sp; sp += ps->ps_sigstk.ss_size; stack_size = ps->ps_sigstk.ss_size; ps->ps_sigstk.ss_flags |= SA_ONSTACK; } else { /* * Get stack pointer, and allocate enough space * for signal handler data. */ if (proc_is64bit_data(p)) { #if defined(__arm64__) sp = CAST_USER_ADDR_T(ts.ts64.ss.sp); sp = (sp - sizeof(user_frame.uf64) - C_64_REDZONE_LEN) & ~0xf; /* Make sure to align to 16 bytes and respect red zone */ #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sp = CAST_USER_ADDR_T(ts.ts32.ss.sp); sp -= sizeof(user_frame.uf32); #if defined(__arm__) && (__BIGGEST_ALIGNMENT__ > 4) sp &= ~0xf; /* Make sure to align to 16 bytes for armv7k */ #endif } } proc_unlock(p); /* * Fill in ucontext (points to mcontext, i.e. thread states). */ if (proc_is64bit_data(p)) { #if defined(__arm64__) sendsig_fill_uctx64(&user_frame.uf64.uctx, oonstack, mask, sp, (user64_size_t)stack_size, (user64_addr_t)&((struct user_sigframe64*)sp)->mctx); #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sendsig_fill_uctx32(&user_frame.uf32.uctx, oonstack, mask, sp, (user32_size_t)stack_size, (user32_addr_t)&((struct user_sigframe32*)sp)->mctx); } /* * Setup siginfo. */ bzero((caddr_t) & sinfo, sizeof(sinfo)); sinfo.si_signo = sig; if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = ts.ts64.ss.pc; sinfo.pad[0] = ts.ts64.ss.sp; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = ts.ts32.ss.pc; sinfo.pad[0] = ts.ts32.ss.sp; } switch (sig) { case SIGILL: #ifdef BER_XXX if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; #else sinfo.si_code = ILL_ILLTRP; #endif break; case SIGFPE: break; case SIGBUS: if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = user_frame.uf64.mctx.es.far; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = user_frame.uf32.mctx.es.far; } sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: if (proc_is64bit_data(p)) { #if defined(__arm64__) sinfo.si_addr = user_frame.uf64.mctx.es.far; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { sinfo.si_addr = user_frame.uf32.mctx.es.far; } #ifdef BER_XXX /* First check in srr1 and then in dsisr */ if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; #else sinfo.si_code = SEGV_ACCERR; #endif break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = (WEXITSTATUS(status_and_exitcode) & 0x00FFFFFF) | (((uint32_t)(p->p_xhighbits) << 24) & 0xFF000000); p->p_xhighbits = 0; break; } } #if CONFIG_DTRACE sendsig_do_dtrace(ut, &sinfo, sig, catcher); #endif /* CONFIG_DTRACE */ /* * Copy signal-handling frame out to user space, set thread state. */ if (proc_is64bit_data(p)) { #if defined(__arm64__) user64_addr_t token; /* * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx64(). We fill in the sinfo now. */ siginfo_user_to_user64(&sinfo, &user_frame.uf64.sinfo); p_uctx = (user_addr_t)&((struct user_sigframe64*)sp)->uctx; /* * Generate the validation token for sigreturn */ token_uctx = p_uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); token = (user64_addr_t)token_uctx ^ (user64_addr_t)ps->ps_sigreturn_token; if (copyout(&user_frame.uf64, sp, sizeof(user_frame.uf64)) != 0) { goto bad; } if (sendsig_set_thread_state64(&ts.ts64.ss, catcher, infostyle, sig, (user64_addr_t)&((struct user_sigframe64*)sp)->sinfo, (user64_addr_t)p_uctx, token, trampact, sp, th_act) != KERN_SUCCESS) goto bad; #else panic("Shouldn't have 64-bit thread states on a 32-bit kernel."); #endif } else { user32_addr_t token; /* * mctx filled in when we get state. uctx filled in by * sendsig_fill_uctx32(). We fill in the sinfo, *pointer* * to uctx and token now. */ siginfo_user_to_user32(&sinfo, &user_frame.uf32.sinfo); p_uctx = (user_addr_t)&((struct user_sigframe32*)sp)->uctx; /* * Generate the validation token for sigreturn */ token_uctx = (user_addr_t)p_uctx; kr = machine_thread_siguctx_pointer_convert_to_user(th_act, &token_uctx); assert(kr == KERN_SUCCESS); token = (user32_addr_t)token_uctx ^ (user32_addr_t)ps->ps_sigreturn_token; user_frame.uf32.puctx = (user32_addr_t)p_uctx; user_frame.uf32.token = token; if (copyout(&user_frame.uf32, sp, sizeof(user_frame.uf32)) != 0) { goto bad; } if (sendsig_set_thread_state32(&ts.ts32.ss, CAST_DOWN_EXPLICIT(user32_addr_t, catcher), infostyle, sig, (user32_addr_t)&((struct user_sigframe32*)sp)->sinfo, CAST_DOWN_EXPLICIT(user32_addr_t, trampact), CAST_DOWN_EXPLICIT(user32_addr_t, sp), th_act) != KERN_SUCCESS) goto bad; } proc_lock(p); return; bad: proc_lock(p); bad2: SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); }
/* * Routine: exception_deliver * Purpose: * Make an upcall to the exception server provided. * Conditions: * Nothing locked and no resources held. * Called from an exception context, so * thread_exception_return and thread_kdb_return * are possible. * Returns: * KERN_SUCCESS if the exception was handled */ kern_return_t exception_deliver( thread_t thread, exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, lck_mtx_t *mutex) { ipc_port_t exc_port; exception_data_type_t small_code[EXCEPTION_CODE_MAX]; int code64; int behavior; int flavor; kern_return_t kr; /* * Save work if we are terminating. * Just go back to our AST handler. */ if (!thread->active) return KERN_SUCCESS; /* * Snapshot the exception action data under lock for consistency. * Hold a reference to the port over the exception_raise_* calls * so it can't be destroyed. This seems like overkill, but keeps * the port from disappearing between now and when * ipc_object_copyin_from_kernel is finally called. */ lck_mtx_lock(mutex); exc_port = excp->port; if (!IP_VALID(exc_port)) { lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_lock(exc_port); if (!ip_active(exc_port)) { ip_unlock(exc_port); lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); flavor = excp->flavor; behavior = excp->behavior; lck_mtx_unlock(mutex); code64 = (behavior & MACH_EXCEPTION_CODES); behavior &= ~MACH_EXCEPTION_CODES; if (!code64) { small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]); small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]); } switch (behavior) { case EXCEPTION_STATE: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state(exc_port, exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state(exc_port, exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } case EXCEPTION_DEFAULT: c_thr_exc_raise++; if (code64) { kr = mach_exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt); } else { kr = exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt); } return kr; case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state_id++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state_identity( exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state_identity(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } default: panic ("bad exception behavior!"); return KERN_FAILURE; }/* switch */ }