//exc_server uses dlsym to find symbol DLLEXPORT kern_return_t catch_exception_raise(mach_port_t exception_port, mach_port_t thread, mach_port_t task, exception_type_t exception, exception_data_t code, mach_msg_type_number_t code_count) { unsigned int count = MACHINE_THREAD_STATE_COUNT; unsigned int exc_count = X86_EXCEPTION_STATE64_COUNT; x86_exception_state64_t exc_state; x86_thread_state64_t state; #ifdef LIBOSXUNWIND if (thread == mach_profiler_thread) { return profiler_segv_handler(exception_port, thread, task, exception, code, code_count); } #endif kern_return_t ret = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t)&exc_state, &exc_count); HANDLE_MACH_ERROR("thread_get_state", ret); uint64_t fault_addr = exc_state.__faultvaddr; #ifdef SEGV_EXCEPTION if (1) { #else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) == 0) { // check if this was a valid address #endif jl_value_t *excpt; if (is_addr_on_stack((void*)fault_addr)) { excpt = jl_stackovf_exception; } #ifdef SEGV_EXCEPTION else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) != 0) { // no page mapped at this address excpt = jl_segv_exception; } #endif else { if (!(exc_state.__err & WRITE_FAULT)) return KERN_INVALID_ARGUMENT; // rethrow the SEGV since it wasn't an error with writing to read-only memory excpt = jl_readonlymemory_exception; } jl_throw_in_thread(0, thread, excpt); return KERN_SUCCESS; } else { kern_return_t ret = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t)&state, &count); HANDLE_MACH_ERROR("thread_get_state", ret); jl_critical_error(SIGSEGV, (unw_context_t*)&state, jl_bt_data, &jl_bt_size); return KERN_INVALID_ARGUMENT; } } void attach_exception_port() { kern_return_t ret; // http://www.opensource.apple.com/source/xnu/xnu-2782.1.97/osfmk/man/thread_set_exception_ports.html ret = thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_ACCESS, segv_port, EXCEPTION_DEFAULT, MACHINE_THREAD_STATE); HANDLE_MACH_ERROR("thread_set_exception_ports", ret); }
void CEeExecutor::AddExceptionHandler() { assert(g_eeExecutor == nullptr); g_eeExecutor = this; #if defined(_WIN32) m_handler = AddVectoredExceptionHandler(TRUE, &CEeExecutor::HandleException); assert(m_handler != NULL); #elif defined(__ANDROID__) struct sigaction sigAction; sigAction.sa_handler = nullptr; sigAction.sa_sigaction = &HandleException; sigAction.sa_flags = SA_SIGINFO; sigemptyset(&sigAction.sa_mask); int result = sigaction(SIGSEGV, &sigAction, nullptr); assert(result >= 0); #elif defined(__APPLE__) kern_return_t result = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &m_port); assert(result == KERN_SUCCESS); m_handlerThread = std::thread([this] () { HandlerThreadProc(); }); result = mach_port_insert_right(mach_task_self(), m_port, m_port, MACH_MSG_TYPE_MAKE_SEND); assert(result == KERN_SUCCESS); result = thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_ACCESS, m_port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, STATE_FLAVOR); assert(result == KERN_SUCCESS); result = mach_port_mod_refs(mach_task_self(), m_port, MACH_PORT_RIGHT_SEND, -1); assert(result == KERN_SUCCESS); #endif }
void GC_attach_current_thread_exceptions_to_handler() { mach_port_t thread_self, exc_port_s; mach_msg_type_name_t type; kern_return_t retval; if (!task_self) return; /* get ids for ourself */ thread_self = mach_thread_self(); /* extract out the send rights for that port, which the OS needs */ retval = mach_port_extract_right(task_self, exc_port, MACH_MSG_TYPE_MAKE_SEND, &exc_port_s, &type); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't extract send rights: %s\n", mach_error_string(retval)); abort(); } /* set the exception ports for this thread to the above */ retval = thread_set_exception_ports(thread_self, EXC_MASK_BAD_ACCESS, exc_port_s, EXCEPTION_DEFAULT, ARCH_THREAD_STATE); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't set exception ports: %s\n", mach_error_string(retval)); abort(); } #if defined(MZ_USE_PLACES) register_mach_thread(); #endif }
void darwin_arm_init_thread_exception_port() { // Called by each new OS thread to bind its EXC_BAD_ACCESS exception // to mach_exception_handler_port_set. int ret; mach_port_t port = MACH_PORT_NULL; ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_allocate failed: %d\n", ret); abort(); } ret = mach_port_insert_right( mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_insert_right failed: %d\n", ret); abort(); } ret = thread_set_exception_ports( mach_thread_self(), EXC_MASK_BAD_ACCESS, port, EXCEPTION_DEFAULT, THREAD_STATE_NONE); if (ret) { fprintf(stderr, "runtime/cgo: thread_set_exception_ports failed: %d\n", ret); abort(); } ret = pthread_mutex_lock(&mach_exception_handler_port_set_mu); if (ret) { fprintf(stderr, "runtime/cgo: pthread_mutex_lock failed: %d\n", ret); abort(); } ret = mach_port_move_member( mach_task_self(), port, mach_exception_handler_port_set); if (ret) { fprintf(stderr, "runtime/cgo: mach_port_move_member failed: %d\n", ret); abort(); } ret = pthread_mutex_unlock(&mach_exception_handler_port_set_mu); if (ret) { fprintf(stderr, "runtime/cgo: pthread_mutex_unlock failed: %d\n", ret); abort(); } }
void EstablishDylanExceptionHandlers(void) { if (exception_port == MACH_PORT_NULL) { // Need a port we can receive exceptions on kern_return_t rc = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &exception_port); if (rc != KERN_SUCCESS) { mach_error("mach_port_allocate send", rc); abort(); } // Need to be able to send on it too rc = mach_port_insert_right(mach_task_self(), exception_port, exception_port, MACH_MSG_TYPE_MAKE_SEND); if (rc != KERN_SUCCESS) { mach_error("mach_port_insert_right", rc); abort(); } // Spawn a thread to serve exception requests pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_t thread; int prc = pthread_create(&thread, &attr, catcher, NULL); pthread_attr_destroy(&attr); if (prc != 0) { fprintf(stderr, "%s: pthread_create returned %d\n", __func__, prc); abort(); } } // Set this thread's exception port kern_return_t rc = thread_set_exception_ports(mach_thread_self(), EXC_MASK_ARITHMETIC|EXC_MASK_BREAKPOINT, exception_port, EXCEPTION_STATE_IDENTITY|MACH_EXCEPTION_CODES, THREAD_STATE_FLAVOR); if (rc != KERN_SUCCESS) { mach_error("thread_set_exception_ports", rc); abort(); } primitive_reset_float_environment(); }
/* tell the kernel that we want EXC_BAD_ACCESS exceptions sent to the exception port (which is being listened to do by the mach exception handling thread). */ kern_return_t mach_thread_init(mach_port_t thread_exception_port) { kern_return_t ret; mach_port_t current_mach_thread; /* allocate a named port for the thread */ FSHOW((stderr, "Allocating mach port %x\n", thread_exception_port)); ret = mach_port_allocate_name(current_mach_task, MACH_PORT_RIGHT_RECEIVE, thread_exception_port); if (ret) { lose("mach_port_allocate_name failed with return_code %d\n", ret); } /* establish the right for the thread_exception_port to send messages */ ret = mach_port_insert_right(current_mach_task, thread_exception_port, thread_exception_port, MACH_MSG_TYPE_MAKE_SEND); if (ret) { lose("mach_port_insert_right failed with return_code %d\n", ret); } current_mach_thread = mach_thread_self(); ret = thread_set_exception_ports(current_mach_thread, EXC_MASK_BAD_ACCESS | EXC_MASK_BAD_INSTRUCTION, thread_exception_port, EXCEPTION_DEFAULT, THREAD_STATE_NONE); if (ret) { lose("thread_set_exception_ports failed with return_code %d\n", ret); } ret = mach_port_deallocate (current_mach_task, current_mach_thread); if (ret) { lose("mach_port_deallocate failed with return_code %d\n", ret); } ret = mach_port_move_member(current_mach_task, thread_exception_port, mach_exception_handler_port_set); if (ret) { lose("mach_port_move_member failed with return_code %d\n", ret); } return ret; }
void InstallExceptionHandler() { mach_port_t port; CheckKR("mach_port_allocate", mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port)); std::thread exc_thread(ExceptionThread, port); exc_thread.detach(); // Obtain a send right for thread_set_exception_ports to copy... CheckKR("mach_port_insert_right", mach_port_insert_right(mach_task_self(), port, port, MACH_MSG_TYPE_MAKE_SEND)); // Mach tries the following exception ports in order: thread, task, host. // Debuggers set the task port, so we grab the thread port. CheckKR("thread_set_exception_ports", thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_ACCESS, port, EXCEPTION_STATE | MACH_EXCEPTION_CODES, x86_THREAD_STATE64)); // ...and get rid of our copy so that MACH_NOTIFY_NO_SENDERS works. CheckKR("mach_port_mod_refs", mach_port_mod_refs(mach_task_self(), port, MACH_PORT_RIGHT_SEND, -1)); mach_port_t previous; CheckKR("mach_port_request_notification", mach_port_request_notification(mach_task_self(), port, MACH_NOTIFY_NO_SENDERS, 0, port, MACH_MSG_TYPE_MAKE_SEND_ONCE, &previous)); }
void osfmach3_trap_mach_aware( struct task_struct *task, boolean_t mach_aware, exception_behavior_t behavior, thread_state_flavor_t flavor) { kern_return_t kr; mach_port_t trap_port; if (task->osfmach3.task->mach_aware == mach_aware) return; task->osfmach3.task->mach_aware = TRUE; if (mach_aware) { /* * Enable Mach privilege for this task. */ trap_port = mach_host_self(); printk("Granting Mach access privileges to process %d (%s)\n", task->pid, task->comm); } else { /* * Revoke Mach privilege for this task. */ trap_port = task->osfmach3.thread->mach_trap_port; printk("Revoking Mach access privileges from process %d (%s)\n", task->pid, task->comm); } kr = thread_set_exception_ports(task->osfmach3.thread->mach_thread_port, EXC_MASK_MACH_SYSCALL, trap_port, behavior, flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_mach_aware: " "thread_set_exception_ports")); panic("osfmach3_trap_mach_aware: can't set exception port"); } }
void osfmach3_changed_identity( struct task_struct *tsk) { mach_port_t trap_port; kern_return_t kr; if (suser()) { trap_port = mach_host_self(); } else { trap_port = tsk->osfmach3.thread->mach_trap_port; } kr = thread_set_exception_ports(tsk->osfmach3.thread->mach_thread_port, EXC_MASK_MACH_SYSCALL, trap_port, thread_exception_behavior, thread_exception_flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_changed_uid: " "thread_set_exception_ports")); printk("osfmach3_changed_uid: can't set exception port.\n"); } }
void osfmach3_trap_init( exception_behavior_t behavior, thread_state_flavor_t flavor) { kern_return_t kr; mach_port_t trap_port_name, trap_port; exception_mask_t mask; thread_exception_behavior = behavior; thread_exception_flavor = flavor; init_task.osfmach3.task->mach_task_port = mach_task_self(); trap_port_name = ((mach_port_t) &init_task) + 1; kr = serv_port_allocate_name(&trap_port, (void *) trap_port_name); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init: " "serv_port_allocate_name(%x)", trap_port_name)); panic("osfmach3_trap_init: " "can't allocate exception port"); } init_task.osfmach3.thread->mach_trap_port = trap_port; kr = mach_port_insert_right(mach_task_self(), trap_port, trap_port, MACH_MSG_TYPE_MAKE_SEND); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init: " "mach_port_insert_right")); panic("osfmach3_trap_init: can't insert send right"); } mask = EXC_MASK_ALL & ~EXC_MASK_RPC_ALERT; if (parent_server) { exception_mask_t syscall_exc_mask; exception_mask_t old_exc_mask; mach_msg_type_number_t old_exc_count; mach_port_t old_exc_port; exception_behavior_t old_exc_behavior; thread_state_flavor_t old_exc_flavor; /* * Don't catch syscall exceptions that are directed to * the parent server. But save the port, behavior and flavor * to be able to restore them later. */ syscall_exc_mask = parent_server_syscall_exc_mask(); old_exc_count = 1; kr = task_get_exception_ports(mach_task_self(), syscall_exc_mask, &old_exc_mask, &old_exc_count, &old_exc_port, &old_exc_behavior, &old_exc_flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init(FIRST_TASK): " "task_get_exception_ports(mask=0x%x)", syscall_exc_mask)); panic("can't get syscall exc port (parent server)"); } if (old_exc_count == 1) { parent_server_syscall_port = old_exc_port; parent_server_syscall_behavior = old_exc_behavior; parent_server_syscall_flavor = old_exc_flavor; } else { printk("osfmach3_trap_init: " "couldn't get our syscall exc port"); } mask &= ~syscall_exc_mask; /* let breakpoints go to the debugger (if any) */ mask &= ~EXC_MASK_BREAKPOINT; /* let Mach syscalls go to Mach */ mask &= ~EXC_MASK_MACH_SYSCALL; } kr = task_set_exception_ports(mach_task_self(), mask, trap_port, behavior, flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init: " "task_set_exception_ports")); panic("osfmach3_trap_init: " "can't set server's task exception ports"); } #if 0 /* obsolete */ if (parent_server) { /* * Hide the EXC_BAD_INSTRUCTION exceptions to avoid * interferences from the parent_server when we do * syscalls to ourselves (see start_kernel() and init()). */ kr = thread_set_exception_ports(mach_thread_self(), EXC_MASK_BAD_INSTRUCTION, MACH_PORT_NULL, behavior, flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(1, kr, ("osfmach3_trap_init: " "thread_set_exception_ports")); panic("can't unset thread exception port"); } } #endif ASSERT(server_exception_port == MACH_PORT_NULL); server_exception_port = trap_port; server_thread_start(server_exception_catcher, (void *) 0); /* * Create a global exception port for user tasks to detect * new user threads. */ kr = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &user_trap_port); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init: " "mach_port_allocate()")); panic("osfmach3_trap_init: " "can't allocate user trap port"); } kr = mach_port_insert_right(mach_task_self(), user_trap_port, user_trap_port, MACH_MSG_TYPE_MAKE_SEND); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_init: " "mach_port_insert_right")); panic("osfmach3_trap_init: can't insert send right " "for user trap port"); } server_thread_start(task_exception_catcher, (void *) 0); }
void osfmach3_trap_setup_task( struct task_struct *task, exception_behavior_t behavior, thread_state_flavor_t flavor) { kern_return_t kr; mach_port_t trap_port_name, trap_port; mach_port_t task_port, thread_port; exception_mask_t mask; if (task == &init_task) return; task_port = task->osfmach3.task->mach_task_port; thread_port = task->osfmach3.thread->mach_thread_port; if (task->pid == 1) { #if 0 /* * Remove the boostrap port for init. */ kr = task_set_bootstrap_port(task_port, MACH_PORT_NULL); if (kr != KERN_SUCCESS) { MACH3_DEBUG(1, kr, ("osfmach3_trap_setup_task: " "task_set_bootstrap_port")); panic("osfmach3_trap_setup_task: " "can't unset bootstrap port"); } #endif /* * For the first process, setup a task exception port * for all exceptions. These exception will be * directed to the global "user_trap_port" and will * allow us to detect threads created by user programs * directly (using Mach system calls). * This exception port will be inherited by the * other tasks on task_create(). */ kr = task_set_exception_ports(task_port, (EXC_MASK_ALL & ~EXC_MASK_RPC_ALERT), user_trap_port, EXCEPTION_STATE_IDENTITY, flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_setup_task: " "task_set_exception_ports")); panic("osfmach3_trap_setup_task: " "can't set global user task exception ports"); } } trap_port_name = ((mach_port_t) task) + 1; if (use_activations) { kr = serv_port_allocate_subsystem(exc_subsystem_port, &trap_port, (void *) trap_port_name); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_setup_task: " "serv_port_allocate_subsystem(%x)", trap_port_name)); panic("osfmach3_trap_setup_task: " "can't allocate exception port"); } } else { kr = serv_port_allocate_name(&trap_port, (void *) trap_port_name); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_setup_task: " "serv_port_allocate_name(%x)", trap_port_name)); panic("osfmach3_trap_setup_task: " "can't allocate exception port"); } } task->osfmach3.thread->mach_trap_port = trap_port; task->osfmach3.thread->mach_trap_port_srights = 0; kr = mach_port_insert_right(mach_task_self(), trap_port, trap_port, MACH_MSG_TYPE_MAKE_SEND); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_setup_task: " "mach_port_insert_right")); panic("osfmach3_trap_setup_task: can't insert send right"); } if (thread_port != MACH_PORT_NULL) { mask = EXC_MASK_ALL & ~EXC_MASK_RPC_ALERT; if (suser() || task->osfmach3.task->mach_aware) { /* let Mach syscalls go to Mach */ mask &= ~EXC_MASK_MACH_SYSCALL; /* the exception port is inherited from the parent */ } kr = thread_set_exception_ports(thread_port, mask, trap_port, behavior, flavor); if (kr != KERN_SUCCESS) { MACH3_DEBUG(0, kr, ("osfmach3_trap_setup_task: " "thread_set_exception_ports")); panic("osfmach3_trap_setup_task: " "can't set user thread exception ports"); } } else { /* we'll set the exception ports later... maybe */ } /* * Add the exception port to the port set. * The exceptions from the server task itself go to * the "server_exception_catcher" thread. */ ux_server_add_port(trap_port); }
//exc_server uses dlsym to find symbol DLLEXPORT kern_return_t catch_exception_raise(mach_port_t exception_port, mach_port_t thread, mach_port_t task, exception_type_t exception, exception_data_t code, mach_msg_type_number_t code_count) { unsigned int count = MACHINE_THREAD_STATE_COUNT; unsigned int exc_count = X86_EXCEPTION_STATE64_COUNT; x86_thread_state64_t state, old_state; x86_exception_state64_t exc_state; kern_return_t ret; //memset(&state,0,sizeof(x86_thread_state64_t)); //memset(&exc_state,0,sizeof(x86_exception_state64_t)); #ifdef LIBOSXUNWIND if (thread == mach_profiler_thread) { return profiler_segv_handler(exception_port,thread,task,exception,code,code_count); } #endif ret = thread_get_state(thread,x86_EXCEPTION_STATE64,(thread_state_t)&exc_state,&exc_count); HANDLE_MACH_ERROR("thread_get_state(1)",ret); uint64_t fault_addr = exc_state.__faultvaddr; #ifdef SEGV_EXCEPTION if (1) { #else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) == 0) { // check if this was a valid address #endif ret = thread_get_state(thread,x86_THREAD_STATE64,(thread_state_t)&state,&count); HANDLE_MACH_ERROR("thread_get_state(2)",ret); old_state = state; // memset(&state,0,sizeof(x86_thread_state64_t)); // Setup libunwind information state.__rsp = (uint64_t)signal_stack + sig_stack_size; state.__rsp -= sizeof(unw_context_t); state.__rsp &= -16; unw_context_t *uc = (unw_context_t*)state.__rsp; state.__rsp -= 512; // This is for alignment. In particular note that the sizeof(void*) is necessary // since it would usually specify the return address (i.e., we are aligning the call // frame to a 16 byte boundary as required by the abi, but the stack pointer // to point to the byte beyond that. Not doing this leads to funny behavior on // the first access to an external function will fail due to stack misalignment state.__rsp &= -16; state.__rsp -= sizeof(void*); memset(uc,0,sizeof(unw_context_t)); memcpy(uc,&old_state,sizeof(x86_thread_state64_t)); state.__rdi = (uint64_t)uc; if (is_addr_on_stack((void*)fault_addr)) { state.__rip = (uint64_t)darwin_stack_overflow_handler; } #ifdef SEGV_EXCEPTION else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) != 0) { // no page mapped at this address state.__rip = (uint64_t)darwin_segv_handler; } #endif else { if (!(exc_state.__err & WRITE_FAULT)) return KERN_INVALID_ARGUMENT; // rethrow the SEGV since it wasn't an error with writing to read-only memory state.__rip = (uint64_t)darwin_accerr_handler; } state.__rbp = state.__rsp; ret = thread_set_state(thread,x86_THREAD_STATE64,(thread_state_t)&state,count); HANDLE_MACH_ERROR("thread_set_state",ret); return KERN_SUCCESS; } else { ret = thread_get_state(thread,x86_THREAD_STATE64,(thread_state_t)&state,&count); HANDLE_MACH_ERROR("thread_get_state(3)",ret); jl_safe_printf("\nsignal (%d): %s\n", SIGSEGV, strsignal(SIGSEGV)); bt_size = rec_backtrace_ctx(bt_data, MAX_BT_SIZE, (unw_context_t*)&state); jlbacktrace(); return KERN_INVALID_ARGUMENT; } } void attach_exception_port() { kern_return_t ret; // http://www.opensource.apple.com/source/xnu/xnu-2782.1.97/osfmk/man/thread_set_exception_ports.html ret = thread_set_exception_ports(mach_thread_self(),EXC_MASK_BAD_ACCESS,segv_port,EXCEPTION_DEFAULT,MACHINE_THREAD_STATE); HANDLE_MACH_ERROR("thread_set_exception_ports",ret); }
/* this initializes the subsystem (sets the exception port, starts the exception handling thread, etc) */ static void macosx_init_exception_handler() { mach_port_t thread_self, exc_port_s; mach_msg_type_name_t type; kern_return_t retval; /* get ids for ourself */ if(!task_self) task_self = mach_task_self(); thread_self = mach_thread_self(); /* allocate the port we're going to get exceptions on */ retval = mach_port_allocate(task_self, MACH_PORT_RIGHT_RECEIVE, &exc_port); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't allocate exception port: %s\n", mach_error_string(retval)); abort(); } /* extract out the send rights for that port, which the OS needs */ retval = mach_port_extract_right(task_self, exc_port, MACH_MSG_TYPE_MAKE_SEND, &exc_port_s, &type); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't extract send rights: %s\n", mach_error_string(retval)); abort(); } /* set the exception ports for this thread to the above */ retval = thread_set_exception_ports(thread_self, EXC_MASK_BAD_ACCESS, exc_port_s, EXCEPTION_DEFAULT, ARCH_THREAD_STATE); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't set exception ports: %s\n", mach_error_string(retval)); abort(); } #ifdef PPC_HAND_ROLLED_THREAD /* Old hand-rolled thread creation. pthread_create is fine for our purposes. */ { /* set up the subthread */ mach_port_t exc_thread; ARCH_thread_state_t *exc_thread_state; void *subthread_stack; retval = thread_create(task_self, &exc_thread); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't create exception thread: %s\n", mach_error_string(retval)); abort(); } subthread_stack = (void*)malloc(page_size); subthread_stack += (page_size - C_ARGSAVE_LEN - C_RED_ZONE); exc_thread_state = (ARCH_thread_state_t*)malloc(sizeof(ARCH_thread_state_t)); exc_thread_state->srr0 = (unsigned int)exception_thread; exc_thread_state->r1 = (unsigned int)subthread_stack; retval = thread_set_state(exc_thread, ARCH_THREAD_STATE, (thread_state_t)exc_thread_state, ARCH_THREAD_STATE_COUNT); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't set subthread state: %s\n", mach_error_string(retval)); abort(); } retval = thread_resume(exc_thread); if(retval != KERN_SUCCESS) { GCPRINT(GCOUTF, "Couldn't resume subthread: %s\n", mach_error_string(retval)); abort(); } } #else { pthread_t th; pthread_create(&th, NULL, (void *(*)(void *))exception_thread, NULL); } #endif }
// Add process pid to the thread table. // If it's already there, don't re-add it (unless force != 0). static Thread* addpid(int pid, int force) { int i, j; mach_port_t task; mach_port_t *thread; uint nthread; Thread *ret; static int first = 1; if(first){ // Allocate a port for exception messages and // send all thread exceptions to that port. // The excthread reads that port and signals // us if we are waiting on that thread. pthread_t p; excport = mach_reply_port(); pthread_mutex_init(&mu, nil); pthread_cond_init(&cond, nil); pthread_create(&p, nil, excthread, nil); pthread_create(&p, nil, waitthread, (void*)(uintptr)pid); first = 0; } if(!force){ for(i=0; i<nthr; i++) if(thr[i].pid == pid) return &thr[i]; } if(me(task_for_pid(mach_task_self(), pid, &task)) < 0) return nil; if(me(task_threads(task, &thread, &nthread)) < 0) return nil; mach_port_insert_right(mach_task_self(), excport, excport, MACH_MSG_TYPE_MAKE_SEND); if(me(task_set_exception_ports(task, ExcMask, excport, EXCEPTION_DEFAULT, MACHINE_THREAD_STATE)) < 0){ fprint(2, "warning: cannot set excport: %r\n"); } ret = nil; for(j=0; j<nthread; j++){ if(force){ // If we're forcing a refresh, don't re-add existing threads. for(i=0; i<nthr; i++) if(thr[i].pid == pid && thr[i].thread == thread[j]){ if(ret == nil) ret = &thr[i]; goto skip; } } if(nthr >= nelem(thr)) return nil; // TODO: We probably should save the old thread exception // ports for each bit and then put them back when we exit. // Probably the BSD signal handlers have put stuff there. mach_port_insert_right(mach_task_self(), excport, excport, MACH_MSG_TYPE_MAKE_SEND); if(me(thread_set_exception_ports(thread[j], ExcMask, excport, EXCEPTION_DEFAULT, MACHINE_THREAD_STATE)) < 0){ fprint(2, "warning: cannot set excport: %r\n"); } thr[nthr].pid = pid; thr[nthr].task = task; thr[nthr].thread = thread[j]; if(ret == nil) ret = &thr[nthr]; nthr++; skip:; } return ret; }
//exc_server uses dlsym to find symbol JL_DLLEXPORT kern_return_t catch_exception_raise(mach_port_t exception_port, mach_port_t thread, mach_port_t task, exception_type_t exception, exception_data_t code, mach_msg_type_number_t code_count) { unsigned int count = MACHINE_THREAD_STATE_COUNT; unsigned int exc_count = X86_EXCEPTION_STATE64_COUNT; x86_exception_state64_t exc_state; x86_thread_state64_t state; #ifdef LIBOSXUNWIND if (thread == mach_profiler_thread) { return profiler_segv_handler(exception_port, thread, task, exception, code, code_count); } #endif int16_t tid; #ifdef JULIA_ENABLE_THREADING jl_tls_states_t *ptls = NULL; for (tid = 0;tid < jl_n_threads;tid++) { if (pthread_mach_thread_np(jl_all_task_states[tid].system_id) == thread) { ptls = jl_all_task_states[tid].ptls; break; } } if (!ptls) { // We don't know about this thread, let the kernel try another handler // instead. This shouldn't actually happen since we only register the // handler for the threads we know about. jl_safe_printf("ERROR: Exception handler triggered on unmanaged thread.\n"); return KERN_INVALID_ARGUMENT; } #else jl_tls_states_t *ptls = &jl_tls_states; tid = 0; #endif kern_return_t ret = thread_get_state(thread, x86_EXCEPTION_STATE64, (thread_state_t)&exc_state, &exc_count); HANDLE_MACH_ERROR("thread_get_state", ret); uint64_t fault_addr = exc_state.__faultvaddr; #ifdef JULIA_ENABLE_THREADING if (fault_addr == (uintptr_t)jl_gc_signal_page) { JL_LOCK_NOGC(gc_suspend); if (!jl_gc_safepoint_activated) { // GC is done before we get the message, do nothing and return JL_UNLOCK_NOGC(gc_suspend); return KERN_SUCCESS; } // Otherwise, set the gc state of the thread, suspend and record it int8_t gc_state = ptls->gc_state; ptls->gc_state = JL_GC_STATE_WAITING; uintptr_t item = tid | (((uintptr_t)gc_state) << 16); arraylist_push(&suspended_threads, (void*)item); thread_suspend(thread); JL_UNLOCK_NOGC(gc_suspend); return KERN_SUCCESS; } #endif #ifdef SEGV_EXCEPTION if (1) { #else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) == 0) { // check if this was a valid address #endif jl_value_t *excpt; if (is_addr_on_stack(ptls, (void*)fault_addr)) { excpt = jl_stackovf_exception; } #ifdef SEGV_EXCEPTION else if (msync((void*)(fault_addr & ~(jl_page_size - 1)), 1, MS_ASYNC) != 0) { // no page mapped at this address excpt = jl_segv_exception; } #endif else { if (!(exc_state.__err & WRITE_FAULT)) return KERN_INVALID_ARGUMENT; // rethrow the SEGV since it wasn't an error with writing to read-only memory excpt = jl_readonlymemory_exception; } jl_throw_in_thread(tid, thread, excpt); return KERN_SUCCESS; } else { kern_return_t ret = thread_get_state(thread, x86_THREAD_STATE64, (thread_state_t)&state, &count); HANDLE_MACH_ERROR("thread_get_state", ret); jl_critical_error(SIGSEGV, (unw_context_t*)&state, ptls->bt_data, &ptls->bt_size); return KERN_INVALID_ARGUMENT; } } static void attach_exception_port(thread_port_t thread) { kern_return_t ret; // http://www.opensource.apple.com/source/xnu/xnu-2782.1.97/osfmk/man/thread_set_exception_ports.html ret = thread_set_exception_ports(thread, EXC_MASK_BAD_ACCESS, segv_port, EXCEPTION_DEFAULT, MACHINE_THREAD_STATE); HANDLE_MACH_ERROR("thread_set_exception_ports", ret); }