kern_return_t thread_info_internal( register thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, /* ptr to OUT array */ mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { int state, flags; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); if (flavor == THREAD_BASIC_INFO) { register thread_basic_info_t basic_info; if (*thread_info_count < THREAD_BASIC_INFO_COUNT) return (KERN_INVALID_ARGUMENT); basic_info = (thread_basic_info_t) thread_info_out; s = splsched(); thread_lock(thread); /* fill in info */ thread_read_times(thread, &basic_info->user_time, &basic_info->system_time); /* * Update lazy-evaluated scheduler info because someone wants it. */ if (thread->sched_stamp != sched_tick) update_priority(thread); basic_info->sleep_time = 0; /* * To calculate cpu_usage, first correct for timer rate, * then for 5/8 ageing. The correction factor [3/5] is * (1/(5/8) - 1). */ basic_info->cpu_usage = ((uint64_t)thread->cpu_usage * TH_USAGE_SCALE) / sched_tick_interval; basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; if (basic_info->cpu_usage > TH_USAGE_SCALE) basic_info->cpu_usage = TH_USAGE_SCALE; basic_info->policy = ((thread->sched_mode & TH_MODE_TIMESHARE)? POLICY_TIMESHARE: POLICY_RR); flags = 0; if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread) flags |= TH_FLAGS_IDLE; if (!thread->kernel_stack) flags |= TH_FLAGS_SWAPPED; state = 0; if (thread->state & TH_TERMINATE) state = TH_STATE_HALTED; else if (thread->state & TH_RUN) state = TH_STATE_RUNNING; else if (thread->state & TH_UNINT) state = TH_STATE_UNINTERRUPTIBLE; else if (thread->state & TH_SUSP) state = TH_STATE_STOPPED; else if (thread->state & TH_WAIT) state = TH_STATE_WAITING; basic_info->run_state = state; basic_info->flags = flags; basic_info->suspend_count = thread->user_stop_count; thread_unlock(thread); splx(s); *thread_info_count = THREAD_BASIC_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { policy_timeshare_info_t ts_info; if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) return (KERN_INVALID_ARGUMENT); ts_info = (policy_timeshare_info_t)thread_info_out; s = splsched(); thread_lock(thread); if (!(thread->sched_mode & TH_MODE_TIMESHARE)) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } ts_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; ts_info->depress_priority = thread->priority; } else { ts_info->base_priority = thread->priority; ts_info->depress_priority = -1; } ts_info->cur_priority = thread->sched_pri; ts_info->max_priority = thread->max_priority; thread_unlock(thread); splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { if (*thread_info_count < POLICY_FIFO_INFO_COUNT) return (KERN_INVALID_ARGUMENT); return (KERN_INVALID_POLICY); } else if (flavor == THREAD_SCHED_RR_INFO) { policy_rr_info_t rr_info; if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode & TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } rr_info->depressed = (thread->sched_mode & TH_MODE_ISDEPRESSED) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; rr_info->depress_priority = thread->priority; } else { rr_info->base_priority = thread->priority; rr_info->depress_priority = -1; } rr_info->max_priority = thread->max_priority; rr_info->quantum = std_quantum_us / 1000; thread_unlock(thread); splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; return (KERN_SUCCESS); } return (KERN_INVALID_ARGUMENT); }
kern_return_t thread_info_internal( register thread_t thread, thread_flavor_t flavor, thread_info_t thread_info_out, /* ptr to OUT array */ mach_msg_type_number_t *thread_info_count) /*IN/OUT*/ { int state, flags; spl_t s; if (thread == THREAD_NULL) return (KERN_INVALID_ARGUMENT); if (flavor == THREAD_BASIC_INFO) { register thread_basic_info_t basic_info; if (*thread_info_count < THREAD_BASIC_INFO_COUNT) return (KERN_INVALID_ARGUMENT); basic_info = (thread_basic_info_t) thread_info_out; s = splsched(); thread_lock(thread); /* fill in info */ thread_read_times(thread, &basic_info->user_time, &basic_info->system_time); /* * Update lazy-evaluated scheduler info because someone wants it. */ if (SCHED(can_update_priority)(thread)) SCHED(update_priority)(thread); basic_info->sleep_time = 0; /* * To calculate cpu_usage, first correct for timer rate, * then for 5/8 ageing. The correction factor [3/5] is * (1/(5/8) - 1). */ basic_info->cpu_usage = 0; #if defined(CONFIG_SCHED_TRADITIONAL) if (sched_tick_interval) { basic_info->cpu_usage = (integer_t)(((uint64_t)thread->cpu_usage * TH_USAGE_SCALE) / sched_tick_interval); basic_info->cpu_usage = (basic_info->cpu_usage * 3) / 5; } #endif if (basic_info->cpu_usage > TH_USAGE_SCALE) basic_info->cpu_usage = TH_USAGE_SCALE; basic_info->policy = ((thread->sched_mode == TH_MODE_TIMESHARE)? POLICY_TIMESHARE: POLICY_RR); flags = 0; if (thread->bound_processor != PROCESSOR_NULL && thread->bound_processor->idle_thread == thread) flags |= TH_FLAGS_IDLE; if (!thread->kernel_stack) flags |= TH_FLAGS_SWAPPED; state = 0; if (thread->state & TH_TERMINATE) state = TH_STATE_HALTED; else if (thread->state & TH_RUN) state = TH_STATE_RUNNING; else if (thread->state & TH_UNINT) state = TH_STATE_UNINTERRUPTIBLE; else if (thread->state & TH_SUSP) state = TH_STATE_STOPPED; else if (thread->state & TH_WAIT) state = TH_STATE_WAITING; basic_info->run_state = state; basic_info->flags = flags; basic_info->suspend_count = thread->user_stop_count; thread_unlock(thread); splx(s); *thread_info_count = THREAD_BASIC_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_IDENTIFIER_INFO) { register thread_identifier_info_t identifier_info; if (*thread_info_count < THREAD_IDENTIFIER_INFO_COUNT) return (KERN_INVALID_ARGUMENT); identifier_info = (thread_identifier_info_t) thread_info_out; s = splsched(); thread_lock(thread); identifier_info->thread_id = thread->thread_id; identifier_info->thread_handle = thread->machine.cthread_self; if(thread->task->bsd_info) { identifier_info->dispatch_qaddr = identifier_info->thread_handle + get_dispatchqueue_offset_from_proc(thread->task->bsd_info); } else { thread_unlock(thread); splx(s); return KERN_INVALID_ARGUMENT; } thread_unlock(thread); splx(s); return KERN_SUCCESS; } else if (flavor == THREAD_SCHED_TIMESHARE_INFO) { policy_timeshare_info_t ts_info; if (*thread_info_count < POLICY_TIMESHARE_INFO_COUNT) return (KERN_INVALID_ARGUMENT); ts_info = (policy_timeshare_info_t)thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode != TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } ts_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (ts_info->depressed) { ts_info->base_priority = DEPRESSPRI; ts_info->depress_priority = thread->priority; } else { ts_info->base_priority = thread->priority; ts_info->depress_priority = -1; } ts_info->cur_priority = thread->sched_pri; ts_info->max_priority = thread->max_priority; thread_unlock(thread); splx(s); *thread_info_count = POLICY_TIMESHARE_INFO_COUNT; return (KERN_SUCCESS); } else if (flavor == THREAD_SCHED_FIFO_INFO) { if (*thread_info_count < POLICY_FIFO_INFO_COUNT) return (KERN_INVALID_ARGUMENT); return (KERN_INVALID_POLICY); } else if (flavor == THREAD_SCHED_RR_INFO) { policy_rr_info_t rr_info; uint32_t quantum_time; uint64_t quantum_ns; if (*thread_info_count < POLICY_RR_INFO_COUNT) return (KERN_INVALID_ARGUMENT); rr_info = (policy_rr_info_t) thread_info_out; s = splsched(); thread_lock(thread); if (thread->sched_mode == TH_MODE_TIMESHARE) { thread_unlock(thread); splx(s); return (KERN_INVALID_POLICY); } rr_info->depressed = (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) != 0; if (rr_info->depressed) { rr_info->base_priority = DEPRESSPRI; rr_info->depress_priority = thread->priority; } else { rr_info->base_priority = thread->priority; rr_info->depress_priority = -1; } quantum_time = SCHED(initial_quantum_size)(THREAD_NULL); absolutetime_to_nanoseconds(quantum_time, &quantum_ns); rr_info->max_priority = thread->max_priority; rr_info->quantum = (uint32_t)(quantum_ns / 1000 / 1000); thread_unlock(thread); splx(s); *thread_info_count = POLICY_RR_INFO_COUNT; return (KERN_SUCCESS); } return (KERN_INVALID_ARGUMENT); }
void act_deallocate( thread_act_t act) { task_t task; thread_t thread; void *task_proc; if (act == NULL) return; act_lock(act); if (--act->act_ref_count > 0) { act_unlock(act); return; } assert(!act->active); thread = act->thread; assert(thread != NULL); thread->top_act = NULL; act_unlock(act); task = act->task; task_lock(task); task_proc = task->bsd_info; { time_value_t user_time, system_time; thread_read_times(thread, &user_time, &system_time); time_value_add(&task->total_user_time, &user_time); time_value_add(&task->total_system_time, &system_time); queue_remove(&task->threads, act, thread_act_t, task_threads); act->task_threads.next = NULL; task->thread_count--; task->res_thread_count--; } task_unlock(task); act_prof_deallocate(act); ipc_thr_act_terminate(act); #ifdef MACH_BSD { extern void uthread_free(task_t, void *, void *, void *); void *ut = act->uthread; uthread_free(task, act, ut, task_proc); act->uthread = NULL; } #endif /* MACH_BSD */ task_deallocate(task); thread_deallocate(thread); }