uint64_t thread_dispatchqaddr( thread_t thread) { uint64_t dispatchqueue_addr = 0; uint64_t thread_handle = 0; if (thread != THREAD_NULL) { thread_handle = thread->machine.cthread_self; if (thread->task->bsd_info) dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); } return (dispatchqueue_addr); }
uint64_t thread_dispatchqaddr( thread_t thread) { uint64_t dispatchqueue_addr = 0; uint64_t thread_handle = 0; if (thread != THREAD_NULL) { #if defined(__ppc__) || defined(__arm__) thread_handle = thread->machine.cthread_self; #else thread_handle = thread->machine.pcb->cthread_self; #endif if (thread->task->bsd_info) dispatchqueue_addr = thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); } return (dispatchqueue_addr); }
kern_return_t thread_info_internal( register thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, /* ptr to OUT array */ mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { int state, flags; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); if (flavor == THREAD_BASIC_INFO) { register thread_basic_info_t basic_info; if (*thread_info_count < THREAD_BASIC_INFO_COUNT) return (KERN_INVALID_ARGUMENT); basic_info = (thread_basic_info_t) thread_info_out; s = splsched(); thread_lock(thread); /* fill in info */ thread_read_times(thread, &basic_info->user_time, &basic_info->system_time); /* * Update lazy-evaluated scheduler info because someone wants it. */ if (SCHED(can_update_priority)(thread)) SCHED(update_priority)(thread); basic_info->sleep_time = 0; /* * To calculate cpu_usage, first correct for timer rate, * then for 5/8 ageing. The correction factor [3/5] is * (1/(5/8) - 1). */ basic_info->cpu_usage = 0; #if defined(CONFIG_SCHED_TRADITIONAL) if (sched_tick_interval) { basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage * TH_USAGE_SCALE) / sched_tick_interval); basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; } #endif if (basic_info->cpu_usage > TH_USAGE_SCALE) basic_info->cpu_usage = TH_USAGE_SCALE; basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? POLICY_TIMESHARE: POLICY_RR); flags = 0; if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread) flags |= TH_FLAGS_IDLE; if (!thread->kernel_stack) flags |= TH_FLAGS_SWAPPED; state = 0; if (thread->state & TH_TERMINATE) state = TH_STATE_HALTED; else if (thread->state & TH_RUN) state = TH_STATE_RUNNING; else if (thread->state & TH_UNINT) state = TH_STATE_UNINTERRUPTIBLE; else if (thread->state & TH_SUSP) state = TH_STATE_STOPPED; else if (thread->state & TH_WAIT) state = TH_STATE_WAITING; basic_info->run_state = state; basic_info->flags = flags; basic_info->suspend_count = thread->user_stop_count; thread_unlock(thread); splx(s); *thread_info_count = THREAD_BASIC_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_IDENTIFIER_INFO) { register thread_identifier_info_t identifier_info; if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) return (KERN_INVALID_ARGUMENT); identifier_info = (thread_identifier_info_t) thread_info_out; s = splsched(); thread_lock(thread); identifier_info->thread_id = thread->thread_id; identifier_info->thread_handle = thread->machine.cthread_self; if(thread->task->bsd_info) { identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); } else { thread_unlock(thread); splx(s); return KERN_INVALID_ARGUMENT; } thread_unlock(thread); splx(s); return KERN_SUCCESS; } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { policy_timeshare_info_t ts_info; if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) return (KERN_INVALID_ARGUMENT); ts_info = (policy_timeshare_info_t)thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode != TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; ts_info->depress_priority = thread->priority; } else { ts_info->base_priority = thread->priority; ts_info->depress_priority = -1; } ts_info->cur_priority = thread->sched_pri; ts_info->max_priority = thread->max_priority; thread_unlock(thread); splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { if (*thread_info_count < POLICY_FIFO_INFO_COUNT) return (KERN_INVALID_ARGUMENT); return (KERN_INVALID_POLICY); } else if (flavor == THREAD_SCHED_RR_INFO) { policy_rr_info_t rr_info; uint32_t quantum_time; uint64_t quantum_ns; if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode == TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; rr_info->depress_priority = thread->priority; } else { rr_info->base_priority = thread->priority; rr_info->depress_priority = -1; } quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); absolutetime_to_nanoseconds(quantum_time, &quantum_ns); rr_info->max_priority = thread->max_priority; rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); thread_unlock(thread); splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; return (KERN_SUCCESS); } return (KERN_INVALID_ARGUMENT); }