static int sendsig_get_state64(thread_t th_act, arm_thread_state64_t *ts, mcontext64_t *mcp) { void *tstate; mach_msg_type_number_t state_count; assert(proc_is64bit_data(current_proc())); tstate = (void *) ts; state_count = ARM_THREAD_STATE64_COUNT; if (thread_getstatus(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) return EINVAL; mcp->ss = *ts; tstate = (void *) &mcp->ss; state_count = ARM_THREAD_STATE64_COUNT; if (machine_thread_state_convert_to_user(th_act, ARM_THREAD_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) return EINVAL; tstate = (void *) &mcp->es; state_count = ARM_EXCEPTION_STATE64_COUNT; if (thread_getstatus(th_act, ARM_EXCEPTION_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) return EINVAL; tstate = (void *) &mcp->ns; state_count = ARM_NEON_STATE64_COUNT; if (thread_getstatus_to_user(th_act, ARM_NEON_STATE64, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) return EINVAL; return 0; }
void handle_established(socket_internal_t *current_socket) { msg_t send; double current_timeout = current_socket->socket_values.tcp_control.rto; if (current_timeout < SECOND) { current_timeout = SECOND; } uint8_t i; if ((current_socket->socket_values.tcp_control.send_nxt > current_socket->socket_values.tcp_control.send_una) && (thread_getstatus(current_socket->send_pid) == STATUS_RECEIVE_BLOCKED)) { for (i = 0; i < current_socket->socket_values.tcp_control.no_of_retries; i++) { current_timeout *= 2; } timex_t now; vtimer_now(&now); if (current_timeout > TCP_ACK_MAX_TIMEOUT) { net_msg_send(&send, current_socket->send_pid, 0, TCP_TIMEOUT); } else if (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > current_timeout) { current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->send_pid, 0, TCP_RETRY); } } }
void handle_established(socket_internal_t *current_socket) { msg_t send; double current_timeout = current_socket->socket_values.tcp_control.rto; if (current_timeout < SECOND) { current_timeout = SECOND; } uint8_t i; if ((current_socket->socket_values.tcp_control.send_nxt > current_socket->socket_values.tcp_control.send_una) && (thread_getstatus(current_socket->send_pid) == STATUS_RECEIVE_BLOCKED)) { for(i = 0; i < current_socket->socket_values.tcp_control.no_of_retries; i++) { current_timeout *= 2; } if (current_timeout > TCP_ACK_MAX_TIMEOUT) { net_msg_send(&send, current_socket->send_pid, 0, TCP_TIMEOUT); // printf("GOT NO ACK: TIMEOUT!\n"); } else if (timex_sub(vtimer_now(), current_socket->socket_values.tcp_control.last_packet_time).microseconds > current_timeout) { // printReasBuffers(); current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->send_pid, 0, TCP_RETRY); // printf("GOT NO ACK YET, %i. RETRY! Now: %lu Before: %lu, Diff: %lu, Cur Timeout: %f\n", current_socket->socket_values.tcp_control.no_of_retries, // vtimer_now().microseconds, current_socket->socket_values.tcp_control.last_packet_time.microseconds, // vtimer_now().microseconds - current_socket->socket_values.tcp_control.last_packet_time.microseconds, // current_timeout); } } }
void handle_synchro_timeout(socket_internal_t *current_socket) { msg_t send; if (thread_getstatus(current_socket->recv_pid) == STATUS_RECEIVE_BLOCKED) { timex_t now; vtimer_now(&now); if ((current_socket->socket_values.tcp_control.no_of_retries == 0) && (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > TCP_SYN_INITIAL_TIMEOUT)) { current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); } else if ((current_socket->socket_values.tcp_control.no_of_retries > 0) && (timex_uint64(timex_sub(now, current_socket->socket_values.tcp_control.last_packet_time)) > (current_socket->socket_values.tcp_control.no_of_retries * TCP_SYN_TIMEOUT + TCP_SYN_INITIAL_TIMEOUT))) { current_socket->socket_values.tcp_control.no_of_retries++; if (current_socket->socket_values.tcp_control.no_of_retries > TCP_MAX_SYN_RETRIES) { net_msg_send(&send, current_socket->recv_pid, 0, TCP_TIMEOUT); } else { net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); } } } }
void handle_synchro_timeout(socket_internal_t *current_socket) { msg_t send; if (thread_getstatus(current_socket->recv_pid) == STATUS_RECEIVE_BLOCKED) { if ((current_socket->socket_values.tcp_control.no_of_retries == 0) && (timex_sub(vtimer_now(), current_socket->socket_values.tcp_control.last_packet_time).microseconds > TCP_SYN_INITIAL_TIMEOUT)) { current_socket->socket_values.tcp_control.no_of_retries++; net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); // printf("FIRST RETRY!\n"); } else if ((current_socket->socket_values.tcp_control.no_of_retries > 0) && (timex_sub(vtimer_now(), current_socket->socket_values.tcp_control.last_packet_time).microseconds > (current_socket->socket_values.tcp_control.no_of_retries * TCP_SYN_TIMEOUT + TCP_SYN_INITIAL_TIMEOUT))) { current_socket->socket_values.tcp_control.no_of_retries++; if (current_socket->socket_values.tcp_control.no_of_retries > TCP_MAX_SYN_RETRIES) { net_msg_send(&send, current_socket->recv_pid, 0, TCP_TIMEOUT); // printf("TCP SYN TIMEOUT!!\n"); } else { net_msg_send(&send, current_socket->recv_pid, 0, TCP_RETRY); // printf("NEXT RETRY!\n"); } } } }
uint8_t handle_payload(ipv6_hdr_t *ipv6_header, tcp_hdr_t *tcp_header, socket_internal_t *tcp_socket, uint8_t *payload) { msg_t m_send_tcp, m_recv_tcp; uint8_t tcp_payload_len = ipv6_header->length - TCP_HDR_LEN; uint8_t acknowledged_bytes = 0; if (tcp_payload_len > tcp_socket->socket_values.tcp_control.rcv_wnd) { mutex_lock(&tcp_socket->tcp_buffer_mutex); memcpy(tcp_socket->tcp_input_buffer, payload, tcp_socket->socket_values.tcp_control.rcv_wnd); acknowledged_bytes = tcp_socket->socket_values.tcp_control.rcv_wnd; tcp_socket->socket_values.tcp_control.rcv_wnd = 0; tcp_socket->tcp_input_buffer_end = tcp_socket->tcp_input_buffer_end + tcp_socket->socket_values.tcp_control.rcv_wnd; mutex_unlock(&tcp_socket->tcp_buffer_mutex); } else { mutex_lock(&tcp_socket->tcp_buffer_mutex); memcpy(tcp_socket->tcp_input_buffer, payload, tcp_payload_len); tcp_socket->socket_values.tcp_control.rcv_wnd = tcp_socket->socket_values.tcp_control.rcv_wnd - tcp_payload_len; acknowledged_bytes = tcp_payload_len; tcp_socket->tcp_input_buffer_end = tcp_socket->tcp_input_buffer_end + tcp_payload_len; mutex_unlock(&tcp_socket->tcp_buffer_mutex); } if (thread_getstatus(tcp_socket->recv_pid) == STATUS_RECEIVE_BLOCKED) { net_msg_send_recv(&m_send_tcp, &m_recv_tcp, tcp_socket->recv_pid, UNDEFINED); } return acknowledged_bytes; }
void thread::join() { if (this->get_id() == this_thread::get_id()) { throw system_error(make_error_code(errc::resource_deadlock_would_occur), "Joining this leads to a deadlock."); } if (joinable()) { auto status = thread_getstatus(m_handle); if (status != STATUS_NOT_FOUND && status != STATUS_STOPPED) { m_data->joining_thread = sched_active_pid; thread_sleep(); } m_handle = thread_uninitialized; } else { throw system_error(make_error_code(errc::invalid_argument), "Can not join an unjoinable thread."); } // missing: no_such_process system error }
/* * Routine: exception_deliver * Purpose: * Make an upcall to the exception server provided. * Conditions: * Nothing locked and no resources held. * Called from an exception context, so * thread_exception_return and thread_kdb_return * are possible. * Returns: * KERN_SUCCESS if the exception was handled */ kern_return_t exception_deliver( thread_t thread, exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, lck_mtx_t *mutex) { ipc_port_t exc_port; exception_data_type_t small_code[EXCEPTION_CODE_MAX]; int code64; int behavior; int flavor; kern_return_t kr; /* * Save work if we are terminating. * Just go back to our AST handler. */ if (!thread->active) return KERN_SUCCESS; /* * Snapshot the exception action data under lock for consistency. * Hold a reference to the port over the exception_raise_* calls * so it can't be destroyed. This seems like overkill, but keeps * the port from disappearing between now and when * ipc_object_copyin_from_kernel is finally called. */ lck_mtx_lock(mutex); exc_port = excp->port; if (!IP_VALID(exc_port)) { lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_lock(exc_port); if (!ip_active(exc_port)) { ip_unlock(exc_port); lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); flavor = excp->flavor; behavior = excp->behavior; lck_mtx_unlock(mutex); code64 = (behavior & MACH_EXCEPTION_CODES); behavior &= ~MACH_EXCEPTION_CODES; if (!code64) { small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]); small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]); } switch (behavior) { case EXCEPTION_STATE: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state(exc_port, exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state(exc_port, exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } case EXCEPTION_DEFAULT: c_thr_exc_raise++; if (code64) { kr = mach_exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt); } else { kr = exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt); } return kr; case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state_id++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state_identity( exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state_identity(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } default: panic ("bad exception behavior!"); return KERN_FAILURE; }/* switch */ }
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { user_addr_t ua_sp; user_addr_t ua_sip; user_addr_t trampact; user_addr_t ua_uctxp; user_addr_t ua_mctxp; user_siginfo_t sinfo32; struct uthread *ut; struct mcontext mctx32; struct user_ucontext32 uctx32; struct sigacts *ps = p->p_sigacts; void *state; arm_thread_state_t *tstate32; mach_msg_type_number_t state_count; int stack_size = 0; int infostyle = UC_TRAD; int oonstack, flavor, error; proc_unlock(p); thread_t thread = current_thread(); ut = get_bsdthread_info(thread); /* * Set up thread state info. */ flavor = ARM_THREAD_STATE; state = (void *)&mctx32.ss; state_count = ARM_THREAD_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = ARM_EXCEPTION_STATE; state = (void *)&mctx32.es; state_count = ARM_EXCEPTION_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; flavor = ARM_VFP_STATE; state = (void *)&mctx32.fs; state_count = ARM_VFP_STATE_COUNT; if (thread_getstatus(thread, flavor, (thread_state_t)state, &state_count) != KERN_SUCCESS) goto bad; tstate32 = &mctx32.ss; /* * Set the signal style. */ if (p->p_sigacts->ps_siginfo & sigmask(sig)) infostyle = UC_FLAVOR; /* * Get the signal disposition. */ trampact = ps->ps_trampact[sig]; /* * Figure out where our new stack lives. */ oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { ua_sp = ut->uu_sigstk.ss_sp; ua_sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { ua_sp = tstate32->sp; } /* * Set up the stack. */ ua_sp -= UC_FLAVOR_SIZE; ua_mctxp = ua_sp; ua_sp -= sizeof(struct user_ucontext32); ua_uctxp = ua_sp; ua_sp -= sizeof(siginfo_t); ua_sip = ua_sp; /* * Align the stack pointer. */ ua_sp = TRUNC_DOWN32(ua_sp, C_32_STK_ALIGN); /* * Build the signal context to be used by sigreturn. */ uctx32.uc_onstack = oonstack; uctx32.uc_sigmask = mask; uctx32.uc_stack.ss_sp = ua_sp; uctx32.uc_stack.ss_size = stack_size; if (oonstack) uctx32.uc_stack.ss_flags |= SS_ONSTACK; uctx32.uc_link = 0; uctx32.uc_mcsize = UC_FLAVOR_SIZE; uctx32.uc_mcontext = ua_mctxp; /* * init siginfo */ bzero((caddr_t)&sinfo32, sizeof(user_siginfo_t)); sinfo32.si_signo = sig; sinfo32.pad[0] = tstate32->sp; sinfo32.si_addr = tstate32->lr; switch (sig) { case SIGILL: sinfo32.si_code = ILL_NOOP; break; case SIGFPE: sinfo32.si_code = FPE_NOOP; break; case SIGBUS: sinfo32.si_code = BUS_ADRALN; break; case SIGSEGV: sinfo32.si_code = SEGV_ACCERR; break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo32.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo32.si_uid = p->si_uid; p->si_uid = 0; sinfo32.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo32.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo32.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo32.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } else { sinfo32.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo32.si_status = WEXITSTATUS(status_and_exitcode); break; } } /* * Copy out context info. */ if (copyout((caddr_t)&uctx32, ua_uctxp, sizeof(struct user_ucontext32)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&sinfo32, ua_sip, sizeof(user_siginfo_t)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&mctx32, ua_mctxp, sizeof(struct mcontext)) != KERN_SUCCESS) goto bad; if (copyout((caddr_t)&ua_uctxp, ua_sp, sizeof(user_addr_t)) != KERN_SUCCESS) goto bad; /* * Set up regsiters for the trampoline. */ tstate32->r[0] = ua_catcher; tstate32->r[1] = infostyle; tstate32->r[2] = sig; tstate32->r[3] = ua_sip; tstate32->sp = ua_sp; if (trampact & 0x01) { tstate32->lr = trampact; tstate32->cpsr = 0x10; /* User mode */ } else { trampact &= ~0x01; tstate32->lr = trampact; tstate32->cpsr = 0x10; /* User mode */ tstate32->cpsr |= (1 << 5); /* T-bit */ } /* * Call the trampoline. */ flavor = ARM_THREAD_STATE; state_count = ARM_THREAD_STATE_COUNT; state = (void *)tstate32; if ((error = thread_setstatus(thread, flavor, (thread_state_t)state, state_count)) != KERN_SUCCESS) panic("sendsig: thread_setstatus failed, ret = %08X\n", error); proc_lock(p); return; bad: proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* * sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); return; }
void sendsig(struct proc *p, user_addr_t ua_catcher, int sig, int mask, __unused uint32_t code) { struct mcontext mctx; thread_t th_act; struct uthread *ut; void *tstate; int flavor; user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */ int infostyle = UC_TRAD; mach_msg_type_number_t state_count; user_addr_t trampact; int oonstack; struct user_ucontext32 uctx; user_addr_t sp; user_addr_t p_uctx; /* user stack addr top copy ucontext */ user_siginfo_t sinfo; user_addr_t p_sinfo; /* user stack addr top copy siginfo */ struct sigacts *ps = p->p_sigacts; int stack_size = 0; kern_return_t kretn; th_act = current_thread(); kprintf("sendsig: Sending signal to thread %p, code %d.\n", th_act, sig); return; ut = get_bsdthread_info(th_act); if (p->p_sigacts->ps_siginfo & sigmask(sig)) { infostyle = UC_FLAVOR; } flavor = ARM_THREAD_STATE; tstate = (void *) &mctx.ss; state_count = ARM_THREAD_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t) tstate, &state_count) != KERN_SUCCESS) goto bad; trampact = ps->ps_trampact[sig]; oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; /* * figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ut->uu_sigstk.ss_sp; sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { sp = CAST_USER_ADDR_T(mctx.ss.sp); } /* * context goes first on stack */ sp -= sizeof(struct ucontext); p_uctx = sp; /* * this is where siginfo goes on stack */ sp -= sizeof(user32_siginfo_t); p_sinfo = sp; /* * final stack pointer */ sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN + C_32_LINKAGE_LEN, C_32_STK_ALIGN); uctx.uc_mcsize = (size_t) ((ARM_THREAD_STATE_COUNT) * sizeof(int)); uctx.uc_onstack = oonstack; uctx.uc_sigmask = mask; uctx.uc_stack.ss_sp = sp; uctx.uc_stack.ss_size = stack_size; if (oonstack) uctx.uc_stack.ss_flags |= SS_ONSTACK; uctx.uc_link = 0; /* * setup siginfo */ bzero((caddr_t) & sinfo, sizeof(sinfo)); sinfo.si_signo = sig; sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.pc); sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.sp); switch (sig) { case SIGILL: sinfo.si_code = ILL_NOOP; break; case SIGFPE: sinfo.si_code = FPE_NOOP; break; case SIGBUS: sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: sinfo.si_code = SEGV_ACCERR; break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode, status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = WEXITSTATUS(status_and_exitcode); break; } } if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext32))) goto bad; if (copyout(&sinfo, p_sinfo, sizeof(sinfo))) goto bad; /* * set signal registers, these are probably wrong.. */ { mctx.ss.r[0] = CAST_DOWN(uint32_t, ua_catcher); mctx.ss.r[1] = (uint32_t) infostyle; mctx.ss.r[2] = (uint32_t) sig; mctx.ss.r[3] = CAST_DOWN(uint32_t, p_sinfo); mctx.ss.r[4] = CAST_DOWN(uint32_t, p_uctx); mctx.ss.pc = CAST_DOWN(uint32_t, trampact); mctx.ss.sp = CAST_DOWN(uint32_t, sp); state_count = ARM_THREAD_STATE_COUNT; printf("sendsig: Sending signal to thread %p, code %d, new pc 0x%08x\n", th_act, sig, trampact); if ((kretn = thread_setstatus(th_act, ARM_THREAD_STATE, (void *) &mctx.ss, state_count)) != KERN_SUCCESS) { panic("sendsig: thread_setstatus failed, ret = %08X\n", kretn); } } proc_lock(p); return; bad: proc_lock(p); SIGACTION(p, SIGILL) = SIG_DFL; sig = sigmask(SIGILL); p->p_sigignore &= ~sig; p->p_sigcatch &= ~sig; ut->uu_sigmask &= ~sig; /* * sendsig is called with signal lock held */ proc_unlock(p); psignal_locked(p, SIGILL); proc_lock(p); return; }
void sendsig(struct proc *p, user_addr_t catcher, int sig, int mask, __unused u_long code) { kern_return_t kretn; struct mcontext mctx; user_addr_t p_mctx = USER_ADDR_NULL; /* mcontext dest. */ struct mcontext64 mctx64; user_addr_t p_mctx64 = USER_ADDR_NULL; /* mcontext dest. */ struct user_ucontext64 uctx; user_addr_t p_uctx; /* user stack addr top copy ucontext */ user_siginfo_t sinfo; user_addr_t p_sinfo; /* user stack addr top copy siginfo */ struct sigacts *ps = p->p_sigacts; int oonstack; user_addr_t sp; mach_msg_type_number_t state_count; thread_t th_act; struct uthread *ut; int infostyle = UC_TRAD; int dualcontext =0; user_addr_t trampact; int vec_used = 0; int stack_size = 0; void * tstate; int flavor; int ctx32 = 1; th_act = current_thread(); ut = get_bsdthread_info(th_act); /* * XXX We conditionalize type passed here based on SA_SIGINFO, but * XXX we always send up all the information, regardless; perhaps * XXX this should not be conditionalized? Defer making this change * XXX now, due to possible tools impact. */ if (p->p_sigacts->ps_siginfo & sigmask(sig)) { /* * If SA_SIGINFO is set, then we must provide the user * process both a siginfo_t and a context argument. We call * this "FLAVORED", as opposed to "TRADITIONAL", which doesn't * expect a context. "DUAL" is a type of "FLAVORED". */ if (is_64signalregset()) { /* * If this is a 64 bit CPU, we must include a 64 bit * context in the data we pass to user space; we may * or may not also include a 32 bit context at the * same time, for non-leaf functions. * * The user may also explicitly choose to not receive * a 32 bit context, at their option; we only allow * this to happen on 64 bit processors, for obvious * reasons. */ if (IS_64BIT_PROCESS(p) || (p->p_sigacts->ps_64regset & sigmask(sig))) { /* * For a 64 bit process, there is no 32 bit * context. */ ctx32 = 0; infostyle = UC_FLAVOR64; } else { /* * For a 32 bit process on a 64 bit CPU, we * may have 64 bit leaf functions, so we need * both contexts. */ dualcontext = 1; infostyle = UC_DUAL; } } else { /* * If this is a 32 bit CPU, then we only have a 32 bit * context to contend with. */ infostyle = UC_FLAVOR; } } else { /* * If SA_SIGINFO is not set, then we have a traditional style * call which does not need additional context passed. The * default is 32 bit traditional. * * XXX The second check is redundant on PPC32; keep it anyway. */ if (is_64signalregset() || IS_64BIT_PROCESS(p)) { /* * However, if this is a 64 bit CPU, we need to change * this to 64 bit traditional, and drop the 32 bit * context. */ ctx32 = 0; infostyle = UC_TRAD64; } } proc_unlock(p); /* I need this for SIGINFO anyway */ flavor = PPC_THREAD_STATE; tstate = (void *)&mctx.ss; state_count = PPC_THREAD_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; if ((ctx32 == 0) || dualcontext) { flavor = PPC_THREAD_STATE64; tstate = (void *)&mctx64.ss; state_count = PPC_THREAD_STATE64_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 1) || dualcontext) { flavor = PPC_EXCEPTION_STATE; tstate = (void *)&mctx.es; state_count = PPC_EXCEPTION_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_EXCEPTION_STATE64; tstate = (void *)&mctx64.es; state_count = PPC_EXCEPTION_STATE64_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 1) || dualcontext) { flavor = PPC_FLOAT_STATE; tstate = (void *)&mctx.fs; state_count = PPC_FLOAT_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_FLOAT_STATE; tstate = (void *)&mctx64.fs; state_count = PPC_FLOAT_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; } if (find_user_vec_curr()) { vec_used = 1; if ((ctx32 == 1) || dualcontext) { flavor = PPC_VECTOR_STATE; tstate = (void *)&mctx.vs; state_count = PPC_VECTOR_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; infostyle += 5; } if ((ctx32 == 0) || dualcontext) { flavor = PPC_VECTOR_STATE; tstate = (void *)&mctx64.vs; state_count = PPC_VECTOR_STATE_COUNT; if (thread_getstatus(th_act, flavor, (thread_state_t)tstate, &state_count) != KERN_SUCCESS) goto bad; infostyle += 5; } } trampact = ps->ps_trampact[sig]; oonstack = ut->uu_sigstk.ss_flags & SA_ONSTACK; /* figure out where our new stack lives */ if ((ut->uu_flag & UT_ALTSTACK) && !oonstack && (ps->ps_sigonstack & sigmask(sig))) { sp = ut->uu_sigstk.ss_sp; sp += ut->uu_sigstk.ss_size; stack_size = ut->uu_sigstk.ss_size; ut->uu_sigstk.ss_flags |= SA_ONSTACK; } else { if (ctx32 == 0) sp = mctx64.ss.r1; else sp = CAST_USER_ADDR_T(mctx.ss.r1); } /* put siginfo on top */ /* preserve RED ZONE area */ if (IS_64BIT_PROCESS(p)) sp = TRUNC_DOWN64(sp, C_64_REDZONE_LEN, C_64_STK_ALIGN); else sp = TRUNC_DOWN32(sp, C_32_REDZONE_LEN, C_32_STK_ALIGN); /* next are the saved registers */ if ((ctx32 == 0) || dualcontext) { sp -= sizeof(struct mcontext64); p_mctx64 = sp; } if ((ctx32 == 1) || dualcontext) { sp -= sizeof(struct mcontext); p_mctx = sp; } if (IS_64BIT_PROCESS(p)) { /* context goes first on stack */ sp -= sizeof(struct user_ucontext64); p_uctx = sp; /* this is where siginfo goes on stack */ sp -= sizeof(user_siginfo_t); p_sinfo = sp; sp = TRUNC_DOWN64(sp, C_64_PARAMSAVE_LEN+C_64_LINKAGE_LEN, C_64_STK_ALIGN); } else { /* * struct ucontext and struct ucontext64 are identical in * size and content; the only difference is the internal * pointer type for the last element, which makes no * difference for the copyout(). */ /* context goes first on stack */ sp -= sizeof(struct ucontext64); p_uctx = sp; /* this is where siginfo goes on stack */ sp -= sizeof(siginfo_t); p_sinfo = sp; sp = TRUNC_DOWN32(sp, C_32_PARAMSAVE_LEN+C_32_LINKAGE_LEN, C_32_STK_ALIGN); } uctx.uc_onstack = oonstack; uctx.uc_sigmask = mask; uctx.uc_stack.ss_sp = sp; uctx.uc_stack.ss_size = stack_size; if (oonstack) uctx.uc_stack.ss_flags |= SS_ONSTACK; uctx.uc_link = 0; if (ctx32 == 0) uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE64_COUNT + PPC_THREAD_STATE64_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); else uctx.uc_mcsize = (size_t)((PPC_EXCEPTION_STATE_COUNT + PPC_THREAD_STATE_COUNT + PPC_FLOAT_STATE_COUNT) * sizeof(int)); if (vec_used) uctx.uc_mcsize += (size_t)(PPC_VECTOR_STATE_COUNT * sizeof(int)); if (ctx32 == 0) uctx.uc_mcontext64 = p_mctx64; else uctx.uc_mcontext64 = p_mctx; /* setup siginfo */ bzero((caddr_t)&sinfo, sizeof(user_siginfo_t)); sinfo.si_signo = sig; if (ctx32 == 0) { sinfo.si_addr = mctx64.ss.srr0; sinfo.pad[0] = mctx64.ss.r1; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.ss.srr0); sinfo.pad[0] = CAST_USER_ADDR_T(mctx.ss.r1); } switch (sig) { case SIGILL: /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if(ctx32 == 0) { if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx64.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; } else { if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_ILL_INS_BIT))) sinfo.si_code = ILL_ILLOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_PRV_INS_BIT))) sinfo.si_code = ILL_PRVOPC; else if (mctx.ss.srr1 & (1 << (31 - SRR1_PRG_TRAP_BIT))) sinfo.si_code = ILL_ILLTRP; else sinfo.si_code = ILL_NOOP; } break; case SIGFPE: #define FPSCR_VX 2 #define FPSCR_OX 3 #define FPSCR_UX 4 #define FPSCR_ZX 5 #define FPSCR_XX 6 /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if(ctx32 == 0) { if (mctx64.fs.fpscr & (1 << (31 - FPSCR_VX))) sinfo.si_code = FPE_FLTINV; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_OX))) sinfo.si_code = FPE_FLTOVF; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_UX))) sinfo.si_code = FPE_FLTUND; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_ZX))) sinfo.si_code = FPE_FLTDIV; else if (mctx64.fs.fpscr & (1 << (31 - FPSCR_XX))) sinfo.si_code = FPE_FLTRES; else sinfo.si_code = FPE_NOOP; } else { if (mctx.fs.fpscr & (1 << (31 - FPSCR_VX))) sinfo.si_code = FPE_FLTINV; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_OX))) sinfo.si_code = FPE_FLTOVF; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_UX))) sinfo.si_code = FPE_FLTUND; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_ZX))) sinfo.si_code = FPE_FLTDIV; else if (mctx.fs.fpscr & (1 << (31 - FPSCR_XX))) sinfo.si_code = FPE_FLTRES; else sinfo.si_code = FPE_NOOP; } break; case SIGBUS: if (ctx32 == 0) { sinfo.si_addr = mctx64.es.dar; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar); } /* on ppc we generate only if EXC_PPC_UNALIGNED */ sinfo.si_code = BUS_ADRALN; break; case SIGSEGV: /* * If it's 64 bit and not a dual context, mctx will * contain uninitialized data, so we have to use * mctx64 here. */ if (ctx32 == 0) { sinfo.si_addr = mctx64.es.dar; /* First check in srr1 and then in dsisr */ if (mctx64.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx64.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; } else { sinfo.si_addr = CAST_USER_ADDR_T(mctx.es.dar); /* First check in srr1 and then in dsisr */ if (mctx.ss.srr1 & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else if (mctx.es.dsisr & (1 << (31 - DSISR_PROT_BIT))) sinfo.si_code = SEGV_ACCERR; else sinfo.si_code = SEGV_MAPERR; } break; default: { int status_and_exitcode; /* * All other signals need to fill out a minimum set of * information for the siginfo structure passed into * the signal handler, if SA_SIGINFO was specified. * * p->si_status actually contains both the status and * the exit code; we save it off in its own variable * for later breakdown. */ proc_lock(p); sinfo.si_pid = p->si_pid; p->si_pid = 0; status_and_exitcode = p->si_status; p->si_status = 0; sinfo.si_uid = p->si_uid; p->si_uid = 0; sinfo.si_code = p->si_code; p->si_code = 0; proc_unlock(p); if (sinfo.si_code == CLD_EXITED) { if (WIFEXITED(status_and_exitcode)) sinfo.si_code = CLD_EXITED; else if (WIFSIGNALED(status_and_exitcode)) { if (WCOREDUMP(status_and_exitcode)) { sinfo.si_code = CLD_DUMPED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } else { sinfo.si_code = CLD_KILLED; status_and_exitcode = W_EXITCODE(status_and_exitcode,status_and_exitcode); } } } /* * The recorded status contains the exit code and the * signal information, but the information to be passed * in the siginfo to the handler is supposed to only * contain the status, so we have to shift it out. */ sinfo.si_status = WEXITSTATUS(status_and_exitcode); break; } } /* copy info out to user space */ if (IS_64BIT_PROCESS(p)) { /* XXX truncates catcher address to uintptr_t */ DTRACE_PROC3(signal__handle, int, sig, siginfo_t *, &sinfo, void (*)(void), CAST_DOWN(sig_t, catcher)); if (copyout(&uctx, p_uctx, sizeof(struct user_ucontext64))) goto bad; if (copyout(&sinfo, p_sinfo, sizeof(user_siginfo_t))) goto bad; } else {