static void allocate_segv_handler() { #ifdef JULIA_ENABLE_THREADING arraylist_new(&suspended_threads, jl_n_threads); #endif pthread_t thread; pthread_attr_t attr; kern_return_t ret; mach_port_t self = mach_task_self(); ret = mach_port_allocate(self, MACH_PORT_RIGHT_RECEIVE, &segv_port); HANDLE_MACH_ERROR("mach_port_allocate",ret); ret = mach_port_insert_right(self, segv_port, segv_port, MACH_MSG_TYPE_MAKE_SEND); HANDLE_MACH_ERROR("mach_port_insert_right",ret); // Alright, create a thread to serve as the listener for exceptions if (pthread_attr_init(&attr) != 0) { jl_error("pthread_attr_init failed"); } pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (pthread_create(&thread, &attr, mach_segv_listener, NULL) != 0) { jl_error("pthread_create failed"); } pthread_attr_destroy(&attr); for (int16_t tid = 0;tid < jl_n_threads;tid++) { attach_exception_port(pthread_mach_thread_np(jl_all_task_states[tid].system_id)); } }
static void allocate_segv_handler() { pthread_t thread; pthread_attr_t attr; kern_return_t ret; mach_port_t self = mach_task_self(); ret = mach_port_allocate(self, MACH_PORT_RIGHT_RECEIVE, &segv_port); HANDLE_MACH_ERROR("mach_port_allocate",ret); ret = mach_port_insert_right(self, segv_port, segv_port, MACH_MSG_TYPE_MAKE_SEND); HANDLE_MACH_ERROR("mach_port_insert_right",ret); // Alright, create a thread to serve as the listener for exceptions if (pthread_attr_init(&attr) != 0) { jl_error("pthread_attr_init failed"); } pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); if (pthread_create(&thread, &attr, mach_segv_listener, NULL) != 0) { jl_error("pthread_create failed"); } pthread_attr_destroy(&attr); attach_exception_port(); }
void *mach_profile_listener(void *arg) { (void)arg; int max_size = 512; attach_exception_port(); mach_profiler_thread = mach_thread_self(); mig_reply_error_t *bufRequest = (mig_reply_error_t *) malloc(max_size); while (1) { kern_return_t ret = mach_msg(&bufRequest->Head, MACH_RCV_MSG, 0, max_size, profile_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); HANDLE_MACH_ERROR("mach_msg",ret); if (bt_size_cur < bt_size_max) { kern_return_t ret; // Suspend the thread so we may safely sample it ret = thread_suspend(main_thread); HANDLE_MACH_ERROR("thread_suspend",ret); // Do the actual sampling unsigned int count = MACHINE_THREAD_STATE_COUNT; x86_thread_state64_t state; // Get the state of the suspended thread ret = thread_get_state(main_thread,x86_THREAD_STATE64,(thread_state_t)&state,&count); HANDLE_MACH_ERROR("thread_get_state",ret); // Initialize the unwind context with the suspend thread's state unw_context_t uc; memset(&uc,0,sizeof(unw_context_t)); memcpy(&uc,&state,sizeof(x86_thread_state64_t)); /* * Unfortunately compact unwind info is incorrectly generated for quite a number of * libraries by quite a large number of compilers. We can fall back to DWARF unwind info * in some cases, but in quite a number of cases (especially libraries not compiled in debug * mode, only the compact unwind info may be available). Even more unfortunately, there is no * way to detect such bogus compact unwind info (other than noticing the resulting segfault). * What we do here is ugly, but necessary until the compact unwind info situation improves. * We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info. * Note that in a small number of cases this may result in bogus stack traces, but at least the topmost * entry will always be correct, and the number of cases in which this is an issue is rather small. * Other than that, this implementation is not incorrect as the other thread is paused while we are profiling * and during stack unwinding we only ever read memory, but never write it. */ forceDwarf = 0; unw_getcontext(&profiler_uc); if (forceDwarf == 0) { // Save the backtrace bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof+bt_size_cur, bt_size_max-bt_size_cur-1, &uc); } else if (forceDwarf == 1) { bt_size_cur += rec_backtrace_ctx_dwarf((ptrint_t*)bt_data_prof+bt_size_cur, bt_size_max-bt_size_cur-1, &uc); } else if (forceDwarf == -1) { jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); } forceDwarf = -2; // Mark the end of this block with 0 bt_data_prof[bt_size_cur] = 0; bt_size_cur++; // We're done! Resume the thread. ret = thread_resume(main_thread); HANDLE_MACH_ERROR("thread_resume",ret) if (running) { // Reset the alarm ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port); HANDLE_MACH_ERROR("clock_alarm",ret) } } }
void *mach_profile_listener(void *arg) { (void)arg; int i; const int max_size = 512; attach_exception_port(); #ifdef LIBOSXUNWIND mach_profiler_thread = mach_thread_self(); #endif mig_reply_error_t *bufRequest = (mig_reply_error_t *) malloc(max_size); while (1) { kern_return_t ret = mach_msg(&bufRequest->Head, MACH_RCV_MSG, 0, max_size, profile_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL); HANDLE_MACH_ERROR("mach_msg", ret); // sample each thread, round-robin style in reverse order // (so that thread zero gets notified last) for (i = jl_n_threads; i-- > 0; ) { // if there is no space left, break early if (bt_size_cur >= bt_size_max - 1) break; unw_context_t *uc; jl_thread_suspend_and_get_state(i, &uc, -1); #ifdef LIBOSXUNWIND /* * Unfortunately compact unwind info is incorrectly generated for quite a number of * libraries by quite a large number of compilers. We can fall back to DWARF unwind info * in some cases, but in quite a number of cases (especially libraries not compiled in debug * mode, only the compact unwind info may be available). Even more unfortunately, there is no * way to detect such bogus compact unwind info (other than noticing the resulting segfault). * What we do here is ugly, but necessary until the compact unwind info situation improves. * We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info. * Note that in a small number of cases this may result in bogus stack traces, but at least the topmost * entry will always be correct, and the number of cases in which this is an issue is rather small. * Other than that, this implementation is not incorrect as the other thread is paused while we are profiling * and during stack unwinding we only ever read memory, but never write it. */ forceDwarf = 0; unw_getcontext(&profiler_uc); // will resume from this point if the next lines segfault at any point if (forceDwarf == 0) { // Save the backtrace bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc); } else if (forceDwarf == 1) { bt_size_cur += rec_backtrace_ctx_dwarf((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc); } else if (forceDwarf == -1) { jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n"); } forceDwarf = -2; #else bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc); #endif // Mark the end of this block with 0 bt_data_prof[bt_size_cur++] = 0; // We're done! Resume the thread. jl_thread_resume(i, 0); if (running) { // Reset the alarm kern_return_t ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port); HANDLE_MACH_ERROR("clock_alarm", ret) } } } }