bool ksthread_getThreadName(const KSThread thread, char* const buffer, int bufLength) { // WARNING: This implementation is no longer async-safe! const pthread_t pthread = pthread_from_mach_thread_np((thread_t)thread); return pthread_getname_np(pthread, buffer, (unsigned)bufLength) == 0; }
void ksmach_init(void) { static volatile sig_atomic_t initialized = 0; if(!initialized) { kern_return_t kr; const task_t thisTask = mach_task_self(); thread_act_array_t threads; mach_msg_type_number_t numThreads; if((kr = task_threads(thisTask, &threads, &numThreads)) != KERN_SUCCESS) { KSLOG_ERROR("task_threads: %s", mach_error_string(kr)); return; } g_topThread = pthread_from_mach_thread_np(threads[0]); for(mach_msg_type_number_t i = 0; i < numThreads; i++) { mach_port_deallocate(thisTask, threads[i]); } vm_deallocate(thisTask, (vm_address_t)threads, sizeof(thread_t) * numThreads); initialized = true; } }
static int getThreadContext(J9ThreadWalkState *state) { int ret = 0; char buffer[1]; PlatformWalkData *data = (PlatformWalkData *)state->platform_data; thread_port_t thread = data->threadList[data->threadIndex]; /* Create a pipe to allow the resumed thread to report it has completed * sending its context info. */ ret = pipe(pipeFileDescriptor); if (0 == ret) { ret = pthread_kill(pthread_from_mach_thread_np(thread), SUSPEND_SIG); } if (0 == ret) { /* Resume the thread, with the signal pending. */ if (KERN_SUCCESS != thread_resume(thread)) { ret = -1; } } if (0 == ret) { /* Wait for the signal handler to complete. */ if (0 == read(pipeFileDescriptor[0], buffer, 1)) { ret = -1; } } return ret; }
bool ksmach_getThreadName(const thread_t thread, char* const buffer, size_t bufLength) { // WARNING: This implementation is no longer async-safe! const pthread_t pthread = pthread_from_mach_thread_np(thread); return pthread_getname_np(pthread, buffer, bufLength) == 0; }
bool ksmach_getThreadQueueName(const thread_t thread, char* const buffer, size_t bufLength) { struct internal_dispatch_queue_s* pQueue; struct internal_dispatch_queue_s queue; if(bufLength > sizeof(queue.dq_label)) { bufLength = sizeof(queue.dq_label); } // Recast the opaque thread to our hacky internal thread structure pointer. const pthread_t pthread = pthread_from_mach_thread_np(thread); const internal_pthread_t const threadStruct = (internal_pthread_t)pthread; if(ksmach_copyMem(&threadStruct->tsd[dispatch_queue_key], &pQueue, sizeof(pQueue)) != KERN_SUCCESS) { KSLOG_TRACE("Could not copy queue pointer from %p", &threadStruct->tsd[dispatch_queue_key]); return false; } if(pQueue == NULL) { KSLOG_TRACE("Queue pointer is NULL"); return false; } if(ksmach_copyMem(pQueue, &queue, sizeof(queue)) != KERN_SUCCESS) { KSLOG_TRACE("Could not copy queue data from %p", pQueue); return false; } // Queue label must be a null terminated string. int iLabel; for(iLabel = 0; iLabel < (int)sizeof(queue.dq_label); iLabel++) { if(queue.dq_label[iLabel] < ' ' || queue.dq_label[iLabel] > '~') { break; } } if(queue.dq_label[iLabel] != 0) { // Found a non-null, invalid char. KSLOG_TRACE("Queue label contains invalid chars"); return false; } strncpy(buffer, queue.dq_label, bufLength); KSLOG_TRACE("Queue label = %s", buffer); return true; }
static void call_fault_handler(mach_port_t thread, exception_type_t exception, exception_data_type_t code, MACH_EXC_STATE_TYPE* exc_state, MACH_THREAD_STATE_TYPE* thread_state, MACH_FLOAT_STATE_TYPE* float_state) { /* Look up the VM instance involved */ THREADHANDLE thread_id = pthread_from_mach_thread_np(thread); FACTOR_ASSERT(thread_id); std::map<THREADHANDLE, factor_vm*>::const_iterator vm = thread_vms.find(thread_id); /* Handle the exception */ if (vm != thread_vms.end()) vm->second->call_fault_handler(exception, code, exc_state, thread_state, float_state); }
static inline dispatch_introspection_queue_function_s _dispatch_introspection_continuation_get_info(dispatch_queue_t dq, dispatch_continuation_t dc, unsigned long *type) { void *ctxt = dc->dc_ctxt; dispatch_function_t func = dc->dc_func; pthread_t waiter = NULL; bool apply = false; long flags = (long)dc->do_vtable; if (flags & DISPATCH_OBJ_SYNC_SLOW_BIT) { waiter = pthread_from_mach_thread_np((mach_port_t)dc->dc_data); if (flags & DISPATCH_OBJ_BARRIER_BIT) { dc = dc->dc_ctxt; dq = dc->dc_data; } ctxt = dc->dc_ctxt; func = dc->dc_func; } if (func == _dispatch_sync_recurse_invoke) { dc = dc->dc_ctxt; dq = dc->dc_data; ctxt = dc->dc_ctxt; func = dc->dc_func; } else if (func == _dispatch_async_redirect_invoke) { dq = dc->dc_data; dc = dc->dc_other; ctxt = dc->dc_ctxt; func = dc->dc_func; flags = (long)dc->do_vtable; } else if (func == _dispatch_mach_barrier_invoke) { dq = dq->do_targetq; ctxt = dc->dc_data; func = dc->dc_other; } else if (func == _dispatch_apply_invoke || func == _dispatch_apply_redirect_invoke) { dispatch_apply_t da = ctxt; if (da->da_todo) { dc = da->da_dc; if (func == _dispatch_apply_redirect_invoke) { dq = dc->dc_data; } ctxt = dc->dc_ctxt; func = dc->dc_func; apply = true; } } if (func == _dispatch_call_block_and_release) { *type = dispatch_introspection_queue_item_type_block; func = _dispatch_Block_invoke(ctxt); } else { *type = dispatch_introspection_queue_item_type_function; } dispatch_introspection_queue_function_s diqf= { .continuation = dc, .target_queue = dq, .context = ctxt, .function = func, .group = flags & DISPATCH_OBJ_GROUP_BIT ? dc->dc_data : NULL, .waiter = waiter, .barrier = flags & DISPATCH_OBJ_BARRIER_BIT, .sync = flags & DISPATCH_OBJ_SYNC_SLOW_BIT, .apply = apply, }; return diqf; } static inline dispatch_introspection_object_s _dispatch_introspection_object_get_info(dispatch_object_t dou) { dispatch_introspection_object_s dio = { .object = dou._dc, .target_queue = dou._do->do_targetq, .type = (void*)dou._do->do_vtable, .kind = dx_kind(dou._do), }; return dio; } DISPATCH_USED inline dispatch_introspection_queue_s dispatch_introspection_queue_get_info(dispatch_queue_t dq) { bool global = (dq->do_xref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT) || (dq->do_ref_cnt == DISPATCH_OBJECT_GLOBAL_REFCNT); uint16_t width = dq->dq_width; if (width > 1 && width != DISPATCH_QUEUE_WIDTH_MAX) width /= 2; dispatch_introspection_queue_s diq = { .queue = dq, .target_queue = dq->do_targetq, .label = dq->dq_label, .serialnum = dq->dq_serialnum, .width = width, .suspend_count = dq->do_suspend_cnt / 2, .enqueued = (dq->do_suspend_cnt & 1) && !global, .barrier = (dq->dq_running & 1) && !global, .draining = (dq->dq_items_head == (void*)~0ul) || (!dq->dq_items_head && dq->dq_items_tail), .global = global, .main = (dq == &_dispatch_main_q), }; return diq; } static inline dispatch_introspection_source_s _dispatch_introspection_source_get_info(dispatch_source_t ds) { dispatch_source_refs_t dr = ds->ds_refs; dispatch_continuation_t dc = dr->ds_handler[DS_EVENT_HANDLER]; void *ctxt = NULL; dispatch_function_t handler = NULL; bool hdlr_is_block = false; if (dc) { ctxt = dc->dc_ctxt; handler = dc->dc_func; hdlr_is_block = ((long)dc->do_vtable & DISPATCH_OBJ_BLOCK_RELEASE_BIT); } bool after = (handler == _dispatch_after_timer_callback); if (after && !(ds->ds_atomic_flags & DSF_CANCELED)) { dc = ctxt; ctxt = dc->dc_ctxt; handler = dc->dc_func; hdlr_is_block = (handler == _dispatch_call_block_and_release); if (hdlr_is_block) { handler = _dispatch_Block_invoke(ctxt); } } dispatch_introspection_source_s dis = { .source = ds, .target_queue = ds->do_targetq, .type = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.filter : 0, .handle = ds->ds_dkev ? (unsigned long)ds->ds_dkev->dk_kevent.ident : 0, .context = ctxt, .handler = handler, .suspend_count = ds->do_suspend_cnt / 2, .enqueued = (ds->do_suspend_cnt & 1), .handler_is_block = hdlr_is_block, .timer = ds->ds_is_timer, .after = after, }; return dis; } static inline dispatch_introspection_queue_thread_s _dispatch_introspection_thread_get_info(dispatch_introspection_thread_t dit) { dispatch_introspection_queue_thread_s diqt = { .object = (void*)dit, .thread = dit->thread, }; if (dit->queue && *dit->queue) { diqt.queue = dispatch_introspection_queue_get_info(*dit->queue); } return diqt; } DISPATCH_USED inline dispatch_introspection_queue_item_s dispatch_introspection_queue_item_get_info(dispatch_queue_t dq, dispatch_continuation_t dc) { dispatch_introspection_queue_item_s diqi; if (DISPATCH_OBJ_IS_VTABLE(dc)) { dispatch_object_t dou = (dispatch_object_t)dc; unsigned long type = dx_type(dou._do); unsigned long metatype = type & _DISPATCH_META_TYPE_MASK; if (metatype == _DISPATCH_QUEUE_TYPE && type != DISPATCH_QUEUE_SPECIFIC_TYPE) { diqi.type = dispatch_introspection_queue_item_type_queue; diqi.queue = dispatch_introspection_queue_get_info(dou._dq); } else if (metatype == _DISPATCH_SOURCE_TYPE && type != DISPATCH_MACH_CHANNEL_TYPE) { diqi.type = dispatch_introspection_queue_item_type_source; diqi.source = _dispatch_introspection_source_get_info(dou._ds); } else { diqi.type = dispatch_introspection_queue_item_type_object; diqi.object = _dispatch_introspection_object_get_info(dou._do); } } else { diqi.function = _dispatch_introspection_continuation_get_info(dq, dc, &diqi.type); } return diqi; }
int mono_sgen_thread_handshake (int signum) { task_t task = current_task (); thread_port_t cur_thread = mach_thread_self (); thread_act_array_t thread_list; mach_msg_type_number_t num_threads; mach_msg_type_number_t num_state; thread_state_t state; kern_return_t ret; ucontext_t ctx; mcontext_t mctx; pthread_t exception_thread = mono_gc_get_mach_exception_thread (); SgenThreadInfo *info; gpointer regs [ARCH_NUM_REGS]; gpointer stack_start; int count, i; mono_mach_get_threads (&thread_list, &num_threads); for (i = 0, count = 0; i < num_threads; i++) { thread_port_t t = thread_list [i]; pthread_t pt = pthread_from_mach_thread_np (t); if (t != cur_thread && pt != exception_thread && !mono_sgen_is_worker_thread (pt)) { if (signum == suspend_signal_num) { ret = thread_suspend (t); if (ret != KERN_SUCCESS) { mach_port_deallocate (task, t); continue; } state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ()); ret = mono_mach_arch_get_thread_state (t, state, &num_state); if (ret != KERN_SUCCESS) { mach_port_deallocate (task, t); continue; } info = mono_sgen_thread_info_lookup (pt); /* Ensure that the runtime is aware of this thread */ if (info != NULL) { mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ()); mono_mach_arch_thread_state_to_mcontext (state, mctx); ctx.uc_mcontext = mctx; info->stopped_domain = mono_mach_arch_get_tls_value_from_thread (t, mono_pthread_key_for_tls (mono_domain_get_tls_key ())); info->stopped_ip = (gpointer) mono_mach_arch_get_ip (state); stack_start = (char*) mono_mach_arch_get_sp (state) - REDZONE_SIZE; /* If stack_start is not within the limits, then don't set it in info and we will be restarted. */ if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) { info->stack_start = stack_start; ARCH_COPY_SIGCTX_REGS (regs, &ctx); info->stopped_regs = regs; } else { g_assert (!info->stack_start); } /* Notify the JIT */ if (mono_gc_get_gc_callbacks ()->thread_suspend_func) mono_gc_get_gc_callbacks ()->thread_suspend_func (info->runtime_data, &ctx); } } else { ret = thread_resume (t); if (ret != KERN_SUCCESS) { mach_port_deallocate (task, t); continue; } } count ++; mach_port_deallocate (task, t); } } mach_port_deallocate (task, cur_thread); return count; }