void exception( integer_t _exception, integer_t code, integer_t subcode) { ipc_thread_t self = current_thread(); ipc_port_t exc_port; if (_exception == KERN_SUCCESS) panic("exception"); /* * Optimized version of retrieve_thread_exception. */ ith_lock(self); assert(self->ith_self != IP_NULL); exc_port = self->ith_exception; if (!IP_VALID(exc_port)) { ith_unlock(self); exception_try_task(_exception, code, subcode); /*NOTREACHED*/ } ip_lock(exc_port); ith_unlock(self); if (!ip_active(exc_port)) { ip_unlock(exc_port); exception_try_task(_exception, code, subcode); /*NOTREACHED*/ } /* * Make a naked send right for the exception port. */ ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); /* * If this exception port doesn't work, * we will want to try the task's exception port. * Indicate this by saving the exception state. */ self->ith_exc = _exception; self->ith_exc_code = code; self->ith_exc_subcode = subcode; exception_raise(exc_port, retrieve_thread_self_fast(self), retrieve_task_self_fast(self->task), _exception, code, subcode); /*NOTREACHED*/ }
void exception_try_task( integer_t _exception, integer_t code, integer_t subcode) { ipc_thread_t self = current_thread(); task_t task = self->task; ipc_port_t exc_port; /* * Optimized version of retrieve_task_exception. */ itk_lock(task); assert(task->itk_self != IP_NULL); exc_port = task->itk_exception; if (!IP_VALID(exc_port)) { itk_unlock(task); exception_no_server(); /*NOTREACHED*/ } ip_lock(exc_port); itk_unlock(task); if (!ip_active(exc_port)) { ip_unlock(exc_port); exception_no_server(); /*NOTREACHED*/ } /* * Make a naked send right for the exception port. */ ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); /* * This is the thread's last chance. * Clear the saved exception state. */ self->ith_exc = KERN_SUCCESS; exception_raise(exc_port, retrieve_thread_self_fast(self), retrieve_task_self_fast(task), _exception, code, subcode); /*NOTREACHED*/ }
mach_port_name_t thread_self_trap( __unused struct thread_self_trap_args *args) { thread_t thread = current_thread(); task_t task = thread->task; ipc_port_t sright; mach_port_name_t name; sright = retrieve_thread_self_fast(thread); name = ipc_port_copyout_send(sright, task->itk_space); return name; }
/* * Routine: exception_deliver * Purpose: * Make an upcall to the exception server provided. * Conditions: * Nothing locked and no resources held. * Called from an exception context, so * thread_exception_return and thread_kdb_return * are possible. * Returns: * KERN_SUCCESS if the exception was handled */ kern_return_t exception_deliver( thread_t thread, exception_type_t exception, mach_exception_data_t code, mach_msg_type_number_t codeCnt, struct exception_action *excp, lck_mtx_t *mutex) { ipc_port_t exc_port; exception_data_type_t small_code[EXCEPTION_CODE_MAX]; int code64; int behavior; int flavor; kern_return_t kr; /* * Save work if we are terminating. * Just go back to our AST handler. */ if (!thread->active) return KERN_SUCCESS; /* * Snapshot the exception action data under lock for consistency. * Hold a reference to the port over the exception_raise_* calls * so it can't be destroyed. This seems like overkill, but keeps * the port from disappearing between now and when * ipc_object_copyin_from_kernel is finally called. */ lck_mtx_lock(mutex); exc_port = excp->port; if (!IP_VALID(exc_port)) { lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_lock(exc_port); if (!ip_active(exc_port)) { ip_unlock(exc_port); lck_mtx_unlock(mutex); return KERN_FAILURE; } ip_reference(exc_port); exc_port->ip_srights++; ip_unlock(exc_port); flavor = excp->flavor; behavior = excp->behavior; lck_mtx_unlock(mutex); code64 = (behavior & MACH_EXCEPTION_CODES); behavior &= ~MACH_EXCEPTION_CODES; if (!code64) { small_code[0] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[0]); small_code[1] = CAST_DOWN_EXPLICIT(exception_data_type_t, code[1]); } switch (behavior) { case EXCEPTION_STATE: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state(exc_port, exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state(exc_port, exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } case EXCEPTION_DEFAULT: c_thr_exc_raise++; if (code64) { kr = mach_exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt); } else { kr = exception_raise(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt); } return kr; case EXCEPTION_STATE_IDENTITY: { mach_msg_type_number_t state_cnt; thread_state_data_t state; c_thr_exc_raise_state_id++; state_cnt = _MachineStateCount[flavor]; kr = thread_getstatus(thread, flavor, (thread_state_t)state, &state_cnt); if (kr == KERN_SUCCESS) { if (code64) { kr = mach_exception_raise_state_identity( exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } else { kr = exception_raise_state_identity(exc_port, retrieve_thread_self_fast(thread), retrieve_task_self_fast(thread->task), exception, small_code, codeCnt, &flavor, state, state_cnt, state, &state_cnt); } if (kr == MACH_MSG_SUCCESS) kr = thread_setstatus(thread, flavor, (thread_state_t)state, state_cnt); } return kr; } default: panic ("bad exception behavior!"); return KERN_FAILURE; }/* switch */ }