void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { clock_sec_t secs; clock_usec_t usecs; uint64_t tval_user, tval_system; tval_user = timer_grab(&thread->user_timer); tval_system = timer_grab(&thread->system_timer); if (thread->precise_user_kernel_time) { absolutetime_to_microtime(tval_user, &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; absolutetime_to_microtime(tval_system, &secs, &usecs); system_time->seconds = (typeof(system_time->seconds))secs; system_time->microseconds = usecs; } else { /* system_timer may represent either sys or user */ tval_user += tval_system; absolutetime_to_microtime(tval_user, &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; system_time->seconds = 0; system_time->microseconds = 0; } }
/* * thread_read_times reads the user and system times from a thread. * Time accumulated since last timestamp is not included. Should * be called at splsched() to avoid having user and system times * be out of step. Doesn't care if caller locked thread. * * Needs to be kept coherent with thread_read_times ahead. */ void thread_read_times( thread_t thread, time_value_t *user_time_p, time_value_t *system_time_p) { timer_save_data_t temp; register timer_t timer; timer = &thread->user_timer; timer_grab(timer, &temp); #ifdef TIMER_ADJUST TIMER_ADJUST(&temp); #endif /* TIMER_ADJUST */ user_time_p->seconds = temp.high + temp.low/1000000; user_time_p->microseconds = temp.low % 1000000; timer = &thread->system_timer; timer_grab(timer, &temp); #ifdef TIMER_ADJUST TIMER_ADJUST(&temp); #endif /* TIMER_ADJUST */ system_time_p->seconds = temp.high + temp.low/1000000; system_time_p->microseconds = temp.low % 1000000; }
/* * thread_terminate_daemon: * * Perform final clean up for terminating threads. */ static void thread_terminate_daemon(void) { thread_t thread; task_t task; (void)splsched(); simple_lock(&thread_terminate_lock); while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) { simple_unlock(&thread_terminate_lock); (void)spllo(); task = thread->task; task_lock(task); task->total_user_time += timer_grab(&thread->user_timer); task->total_system_time += timer_grab(&thread->system_timer); task->c_switch += thread->c_switch; task->p_switch += thread->p_switch; task->ps_switch += thread->ps_switch; queue_remove(&task->threads, thread, thread_t, task_threads); task->thread_count--; /* * If the task is being halted, and there is only one thread * left in the task after this one, then wakeup that thread. */ if (task->thread_count == 1 && task->halting) thread_wakeup((event_t)&task->halting); task_unlock(task); lck_mtx_lock(&tasks_threads_lock); queue_remove(&threads, thread, thread_t, threads); threads_count--; lck_mtx_unlock(&tasks_threads_lock); thread_deallocate(thread); (void)splsched(); simple_lock(&thread_terminate_lock); } assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT); simple_unlock(&thread_terminate_lock); /* splsched */ thread_block((thread_continue_t)thread_terminate_daemon); /*NOTREACHED*/ }
int64_t dtrace_calc_thread_recent_vtime(thread_t thread) { if (thread != THREAD_NULL) { processor_t processor = current_processor(); uint64_t abstime = mach_absolute_time(); timer_t timer; timer = PROCESSOR_DATA(processor, thread_timer); return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) + (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */ } else return 0; }
void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { absolutetime_to_microtime(timer_grab(&thread->user_timer), (unsigned *)&user_time->seconds, (unsigned *)&user_time->microseconds); absolutetime_to_microtime(timer_grab(&thread->system_timer), (unsigned *)&system_time->seconds, (unsigned *)&system_time->microseconds); }
void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { clock_sec_t secs; clock_usec_t usecs; absolutetime_to_microtime(timer_grab(&thread->user_timer), &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; absolutetime_to_microtime(timer_grab(&thread->system_timer), &secs, &usecs); system_time->seconds = (typeof(system_time->seconds))secs; system_time->microseconds = usecs; }
/* * thread_terminate_daemon: * * Perform final clean up for terminating threads. */ static void thread_terminate_daemon(void) { thread_t thread; task_t task; (void)splsched(); simple_lock(&thread_terminate_lock); while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) { simple_unlock(&thread_terminate_lock); (void)spllo(); task = thread->task; task_lock(task); task->total_user_time += timer_grab(&thread->user_timer); task->total_system_time += timer_grab(&thread->system_timer); task->c_switch += thread->c_switch; task->p_switch += thread->p_switch; task->ps_switch += thread->ps_switch; queue_remove(&task->threads, thread, thread_t, task_threads); task->thread_count--; task_unlock(task); mutex_lock(&tasks_threads_lock); queue_remove(&threads, thread, thread_t, threads); threads_count--; mutex_unlock(&tasks_threads_lock); thread_deallocate(thread); (void)splsched(); simple_lock(&thread_terminate_lock); } assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT); simple_unlock(&thread_terminate_lock); /* splsched */ thread_block((thread_continue_t)thread_terminate_daemon); /*NOTREACHED*/ }
unsigned timer_delta( register timer_t timer, timer_save_t save) { timer_save_data_t new_save; register unsigned result; timer_grab(timer,&new_save); result = (new_save.high - save->high) * TIMER_HIGH_UNIT + new_save.low - save->low; save->high = new_save.high; save->low = new_save.low; return(result); }
void timer_read( timer_t timer, register time_value_t *tv) { timer_save_data_t temp; timer_grab(timer,&temp); /* * Normalize the result */ #ifdef TIMER_ADJUST TIMER_ADJUST(&temp); #endif /* TIMER_ADJUST */ tv->seconds = temp.high + temp.low/1000000; tv->microseconds = temp.low%1000000; }
/* * thread_terminate_daemon: * * Perform final clean up for terminating threads. */ static void thread_terminate_daemon(void) { thread_t self, thread; task_t task; self = current_thread(); self->options |= TH_OPT_SYSTEM_CRITICAL; (void)splsched(); simple_lock(&thread_terminate_lock); while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) { simple_unlock(&thread_terminate_lock); (void)spllo(); task = thread->task; task_lock(task); task->total_user_time += timer_grab(&thread->user_timer); if (thread->precise_user_kernel_time) { task->total_system_time += timer_grab(&thread->system_timer); } else { task->total_user_time += timer_grab(&thread->system_timer); } task->c_switch += thread->c_switch; task->p_switch += thread->p_switch; task->ps_switch += thread->ps_switch; task->syscalls_unix += thread->syscalls_unix; task->syscalls_mach += thread->syscalls_mach; task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1; task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2; queue_remove(&task->threads, thread, thread_t, task_threads); task->thread_count--; /* * If the task is being halted, and there is only one thread * left in the task after this one, then wakeup that thread. */ if (task->thread_count == 1 && task->halting) thread_wakeup((event_t)&task->halting); task_unlock(task); lck_mtx_lock(&tasks_threads_lock); queue_remove(&threads, thread, thread_t, threads); threads_count--; lck_mtx_unlock(&tasks_threads_lock); thread_deallocate(thread); (void)splsched(); simple_lock(&thread_terminate_lock); } assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT); simple_unlock(&thread_terminate_lock); /* splsched */ self->options &= ~TH_OPT_SYSTEM_CRITICAL; thread_block((thread_continue_t)thread_terminate_daemon); /*NOTREACHED*/ }
kern_return_t processor_info( register processor_t processor, processor_flavor_t flavor, host_t *host, processor_info_t info, mach_msg_type_number_t *count) { register int cpu_id, state; kern_return_t result; if (processor == PROCESSOR_NULL) return (KERN_INVALID_ARGUMENT); cpu_id = processor->cpu_id; switch (flavor) { case PROCESSOR_BASIC_INFO: { register processor_basic_info_t basic_info; if (*count < PROCESSOR_BASIC_INFO_COUNT) return (KERN_FAILURE); basic_info = (processor_basic_info_t) info; basic_info->cpu_type = slot_type(cpu_id); basic_info->cpu_subtype = slot_subtype(cpu_id); state = processor->state; if (state == PROCESSOR_OFF_LINE) basic_info->running = FALSE; else basic_info->running = TRUE; basic_info->slot_num = cpu_id; if (processor == master_processor) basic_info->is_master = TRUE; else basic_info->is_master = FALSE; *count = PROCESSOR_BASIC_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } case PROCESSOR_CPU_LOAD_INFO: { register processor_cpu_load_info_t cpu_load_info; if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); cpu_load_info = (processor_cpu_load_info_t) info; cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, idle_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; *count = PROCESSOR_CPU_LOAD_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } default: result = cpu_info(flavor, cpu_id, info, count); if (result == KERN_SUCCESS) *host = &realhost; return (result); } }
kern_return_t host_statistics( host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t *count) { uint32_t i; if (host == HOST_NULL) return (KERN_INVALID_HOST); switch(flavor) { case HOST_LOAD_INFO: { host_load_info_t load_info; if (*count < HOST_LOAD_INFO_COUNT) return (KERN_FAILURE); load_info = (host_load_info_t) info; bcopy((char *) avenrun, (char *) load_info->avenrun, sizeof avenrun); bcopy((char *) mach_factor, (char *) load_info->mach_factor, sizeof mach_factor); *count = HOST_LOAD_INFO_COUNT; return (KERN_SUCCESS); } case HOST_VM_INFO: { register processor_t processor; register vm_statistics64_t stat; vm_statistics64_data_t host_vm_stat; vm_statistics_t stat32; mach_msg_type_number_t original_count; if (*count < HOST_VM_INFO_REV0_COUNT) return (KERN_FAILURE); processor = processor_list; stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat = *stat; if (processor_count > 1) { simple_lock(&processor_list_lock); while ((processor = processor->processor_list) != NULL) { stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat.zero_fill_count += stat->zero_fill_count; host_vm_stat.reactivations += stat->reactivations; host_vm_stat.pageins += stat->pageins; host_vm_stat.pageouts += stat->pageouts; host_vm_stat.faults += stat->faults; host_vm_stat.cow_faults += stat->cow_faults; host_vm_stat.lookups += stat->lookups; host_vm_stat.hits += stat->hits; } simple_unlock(&processor_list_lock); } stat32 = (vm_statistics_t) info; stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count); stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count); if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) { struct vpl *lq; lq = &vm_page_local_q[i].vpl_un.vpl; stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count); } } stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count); #if CONFIG_EMBEDDED stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count); #else stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count); #endif stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count); stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations); stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins); stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts); stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults); stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults); stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups); stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits); /* * Fill in extra info added in later revisions of the * vm_statistics data structure. Fill in only what can fit * in the data structure the caller gave us ! */ original_count = *count; *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */ if (original_count >= HOST_VM_INFO_REV1_COUNT) { /* rev1 added "purgeable" info */ stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count); stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count); *count = HOST_VM_INFO_REV1_COUNT; } if (original_count >= HOST_VM_INFO_REV2_COUNT) { /* rev2 added "speculative" info */ stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count); *count = HOST_VM_INFO_REV2_COUNT; } /* rev3 changed some of the fields to be 64-bit*/ return (KERN_SUCCESS); } case HOST_CPU_LOAD_INFO: { register processor_t processor; host_cpu_load_info_t cpu_load_info; if (*count < HOST_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); #define GET_TICKS_VALUE(processor, state, timer) \ MACRO_BEGIN \ cpu_load_info->cpu_ticks[(state)] += \ (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \ / hz_tick_interval); \ MACRO_END cpu_load_info = (host_cpu_load_info_t)info; cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0; cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0; cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; simple_lock(&processor_list_lock); for (processor = processor_list; processor != NULL; processor = processor->processor_list) { timer_data_t idle_temp; timer_t idle_state; GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state); if (precise_user_kernel_time) { GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state); } else { /* system_state may represent either sys or user */ GET_TICKS_VALUE(processor, CPU_STATE_USER, system_state); } idle_state = &PROCESSOR_DATA(processor, idle_state); idle_temp = *idle_state; if (PROCESSOR_DATA(processor, current_state) != idle_state || timer_grab(&idle_temp) != timer_grab(idle_state)) GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state); else { timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp); cpu_load_info->cpu_ticks[CPU_STATE_IDLE] += (uint32_t)(timer_grab(&idle_temp) / hz_tick_interval); } } simple_unlock(&processor_list_lock); *count = HOST_CPU_LOAD_INFO_COUNT; return (KERN_SUCCESS); } case HOST_EXPIRED_TASK_INFO: { if (*count < TASK_POWER_INFO_COUNT) { return (KERN_FAILURE); } task_power_info_t tinfo = (task_power_info_t)info; tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups; tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups; tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1; tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2; tinfo->total_user = dead_task_statistics.total_user_time; tinfo->total_system = dead_task_statistics.total_system_time; return (KERN_SUCCESS); } default: return (KERN_INVALID_ARGUMENT); } }
kern_return_t processor_info( processor_t processor, processor_flavor_t flavor, host_t *host, processor_info_t info, mach_msg_type_number_t *count) { int cpu_id, state; kern_return_t result; if (processor == PROCESSOR_NULL) return (KERN_INVALID_ARGUMENT); cpu_id = processor->cpu_id; switch (flavor) { case PROCESSOR_BASIC_INFO: { processor_basic_info_t basic_info; if (*count < PROCESSOR_BASIC_INFO_COUNT) return (KERN_FAILURE); basic_info = (processor_basic_info_t) info; basic_info->cpu_type = slot_type(cpu_id); basic_info->cpu_subtype = slot_subtype(cpu_id); state = processor->state; if (state == PROCESSOR_OFF_LINE) basic_info->running = FALSE; else basic_info->running = TRUE; basic_info->slot_num = cpu_id; if (processor == master_processor) basic_info->is_master = TRUE; else basic_info->is_master = FALSE; *count = PROCESSOR_BASIC_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } case PROCESSOR_CPU_LOAD_INFO: { processor_cpu_load_info_t cpu_load_info; timer_t idle_state; uint64_t idle_time_snapshot1, idle_time_snapshot2; uint64_t idle_time_tstamp1, idle_time_tstamp2; /* * We capture the accumulated idle time twice over * the course of this function, as well as the timestamps * when each were last updated. Since these are * all done using non-atomic racy mechanisms, the * most we can infer is whether values are stable. * timer_grab() is the only function that can be * used reliably on another processor's per-processor * data. */ if (*count < PROCESSOR_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); cpu_load_info = (processor_cpu_load_info_t) info; if (precise_user_kernel_time) { cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, user_state)) / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, system_state)) / hz_tick_interval); } else { uint64_t tval = timer_grab(&PROCESSOR_DATA(processor, user_state)) + timer_grab(&PROCESSOR_DATA(processor, system_state)); cpu_load_info->cpu_ticks[CPU_STATE_USER] = (uint32_t)(tval / hz_tick_interval); cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; } idle_state = &PROCESSOR_DATA(processor, idle_state); idle_time_snapshot1 = timer_grab(idle_state); idle_time_tstamp1 = idle_state->tstamp; /* * Idle processors are not continually updating their * per-processor idle timer, so it may be extremely * out of date, resulting in an over-representation * of non-idle time between two measurement * intervals by e.g. top(1). If we are non-idle, or * have evidence that the timer is being updated * concurrently, we consider its value up-to-date. */ if (PROCESSOR_DATA(processor, current_state) != idle_state) { cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } else if ((idle_time_snapshot1 != (idle_time_snapshot2 = timer_grab(idle_state))) || (idle_time_tstamp1 != (idle_time_tstamp2 = idle_state->tstamp))){ /* Idle timer is being updated concurrently, second stamp is good enough */ cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot2 / hz_tick_interval); } else { /* * Idle timer may be very stale. Fortunately we have established * that idle_time_snapshot1 and idle_time_tstamp1 are unchanging */ idle_time_snapshot1 += mach_absolute_time() - idle_time_tstamp1; cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = (uint32_t)(idle_time_snapshot1 / hz_tick_interval); } cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; *count = PROCESSOR_CPU_LOAD_INFO_COUNT; *host = &realhost; return (KERN_SUCCESS); } default: result = cpu_info(flavor, cpu_id, info, count); if (result == KERN_SUCCESS) *host = &realhost; return (result); } }