/*------------------------------------------------------------------------- Run our virtual Neo Geo according to the run mode. ---------------------------------------------------------------------------*/ Uint32 neogeo_run ( void ) { Uint32 TimeSlice; Uint32 Elapsed; Uint32 Z80_Elapsed; Uint32 ReturnCode; ReturnCode = 0; switch ( neogeo_run_mode ) { case NEOGEO_RUN_QUIT: ReturnCode = 1; break; case NEOGEO_RUN_TRACE: Elapsed = m68k_execute ( 0 ); if ( !neogeo_z80_disable ) { neogeo_z80_time_to_execute += M68K_TO_REF ( Elapsed ); if ( neogeo_z80_time_to_execute > 0 ) { Z80_Elapsed = z80_execute ( REF_TO_Z80 ( neogeo_z80_time_to_execute ) ); neogeo_z80_time_to_execute -= Z80_TO_REF ( Z80_Elapsed ); neogeo_z80_time_this_vbl += Z80_TO_REF ( Z80_Elapsed ); } } neogeo_screen_position += M68K_TO_REF ( Elapsed ); timer_advance ( M68K_TO_REF ( Elapsed ) ); timer_call_events(); ReturnCode = 3; break; case NEOGEO_RUN_NORMAL: case NEOGEO_RUN_FULL_THROTTLE: while ( 1 ) { TimeSlice = REF_TO_M68K ( timer_request_timeslice() ); if ( neogeo_show_debugger ) { neogeo_show_debugger = 0; ReturnCode = 3; break; } Elapsed = m68k_execute ( TimeSlice ); if ( !neogeo_z80_disable ) { neogeo_z80_time_to_execute += M68K_TO_REF ( Elapsed ); if ( neogeo_z80_time_to_execute > 0 ) { Z80_Elapsed = z80_execute ( REF_TO_Z80 ( neogeo_z80_time_to_execute ) ); neogeo_z80_time_to_execute -= Z80_TO_REF ( Z80_Elapsed ); neogeo_z80_time_this_vbl += Z80_TO_REF ( Z80_Elapsed ); } } neogeo_screen_position += M68K_TO_REF ( Elapsed ); timer_advance ( M68K_TO_REF ( Elapsed ) ); if ( neogeo_show_debugger ) { neogeo_show_debugger = 0; ReturnCode = 3; } if ( neogeo_run_mode == NEOGEO_RUN_TRACE ) { timer_call_events(); ReturnCode = 2; } if ( neogeo_run_mode == NEOGEO_RUN_QUIT ) ReturnCode = 1; if ( ReturnCode ) break; } break; } return ReturnCode; }
kern_return_t host_statistics( host_t host, host_flavor_t flavor, host_info_t info, mach_msg_type_number_t *count) { uint32_t i; if (host == HOST_NULL) return (KERN_INVALID_HOST); switch(flavor) { case HOST_LOAD_INFO: { host_load_info_t load_info; if (*count < HOST_LOAD_INFO_COUNT) return (KERN_FAILURE); load_info = (host_load_info_t) info; bcopy((char *) avenrun, (char *) load_info->avenrun, sizeof avenrun); bcopy((char *) mach_factor, (char *) load_info->mach_factor, sizeof mach_factor); *count = HOST_LOAD_INFO_COUNT; return (KERN_SUCCESS); } case HOST_VM_INFO: { register processor_t processor; register vm_statistics64_t stat; vm_statistics64_data_t host_vm_stat; vm_statistics_t stat32; mach_msg_type_number_t original_count; if (*count < HOST_VM_INFO_REV0_COUNT) return (KERN_FAILURE); processor = processor_list; stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat = *stat; if (processor_count > 1) { simple_lock(&processor_list_lock); while ((processor = processor->processor_list) != NULL) { stat = &PROCESSOR_DATA(processor, vm_stat); host_vm_stat.zero_fill_count += stat->zero_fill_count; host_vm_stat.reactivations += stat->reactivations; host_vm_stat.pageins += stat->pageins; host_vm_stat.pageouts += stat->pageouts; host_vm_stat.faults += stat->faults; host_vm_stat.cow_faults += stat->cow_faults; host_vm_stat.lookups += stat->lookups; host_vm_stat.hits += stat->hits; } simple_unlock(&processor_list_lock); } stat32 = (vm_statistics_t) info; stat32->free_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_free_count + vm_page_speculative_count); stat32->active_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_active_count); if (vm_page_local_q) { for (i = 0; i < vm_page_local_q_count; i++) { struct vpl *lq; lq = &vm_page_local_q[i].vpl_un.vpl; stat32->active_count += VM_STATISTICS_TRUNCATE_TO_32_BIT(lq->vpl_count); } } stat32->inactive_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_inactive_count); #if CONFIG_EMBEDDED stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count); #else stat32->wire_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_wire_count + vm_page_throttled_count + vm_lopage_free_count); #endif stat32->zero_fill_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.zero_fill_count); stat32->reactivations = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.reactivations); stat32->pageins = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageins); stat32->pageouts = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.pageouts); stat32->faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.faults); stat32->cow_faults = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.cow_faults); stat32->lookups = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.lookups); stat32->hits = VM_STATISTICS_TRUNCATE_TO_32_BIT(host_vm_stat.hits); /* * Fill in extra info added in later revisions of the * vm_statistics data structure. Fill in only what can fit * in the data structure the caller gave us ! */ original_count = *count; *count = HOST_VM_INFO_REV0_COUNT; /* rev0 already filled in */ if (original_count >= HOST_VM_INFO_REV1_COUNT) { /* rev1 added "purgeable" info */ stat32->purgeable_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purgeable_count); stat32->purges = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_purged_count); *count = HOST_VM_INFO_REV1_COUNT; } if (original_count >= HOST_VM_INFO_REV2_COUNT) { /* rev2 added "speculative" info */ stat32->speculative_count = VM_STATISTICS_TRUNCATE_TO_32_BIT(vm_page_speculative_count); *count = HOST_VM_INFO_REV2_COUNT; } /* rev3 changed some of the fields to be 64-bit*/ return (KERN_SUCCESS); } case HOST_CPU_LOAD_INFO: { register processor_t processor; host_cpu_load_info_t cpu_load_info; if (*count < HOST_CPU_LOAD_INFO_COUNT) return (KERN_FAILURE); #define GET_TICKS_VALUE(processor, state, timer) \ MACRO_BEGIN \ cpu_load_info->cpu_ticks[(state)] += \ (uint32_t)(timer_grab(&PROCESSOR_DATA(processor, timer)) \ / hz_tick_interval); \ MACRO_END cpu_load_info = (host_cpu_load_info_t)info; cpu_load_info->cpu_ticks[CPU_STATE_USER] = 0; cpu_load_info->cpu_ticks[CPU_STATE_SYSTEM] = 0; cpu_load_info->cpu_ticks[CPU_STATE_IDLE] = 0; cpu_load_info->cpu_ticks[CPU_STATE_NICE] = 0; simple_lock(&processor_list_lock); for (processor = processor_list; processor != NULL; processor = processor->processor_list) { timer_data_t idle_temp; timer_t idle_state; GET_TICKS_VALUE(processor, CPU_STATE_USER, user_state); if (precise_user_kernel_time) { GET_TICKS_VALUE(processor, CPU_STATE_SYSTEM, system_state); } else { /* system_state may represent either sys or user */ GET_TICKS_VALUE(processor, CPU_STATE_USER, system_state); } idle_state = &PROCESSOR_DATA(processor, idle_state); idle_temp = *idle_state; if (PROCESSOR_DATA(processor, current_state) != idle_state || timer_grab(&idle_temp) != timer_grab(idle_state)) GET_TICKS_VALUE(processor, CPU_STATE_IDLE, idle_state); else { timer_advance(&idle_temp, mach_absolute_time() - idle_temp.tstamp); cpu_load_info->cpu_ticks[CPU_STATE_IDLE] += (uint32_t)(timer_grab(&idle_temp) / hz_tick_interval); } } simple_unlock(&processor_list_lock); *count = HOST_CPU_LOAD_INFO_COUNT; return (KERN_SUCCESS); } case HOST_EXPIRED_TASK_INFO: { if (*count < TASK_POWER_INFO_COUNT) { return (KERN_FAILURE); } task_power_info_t tinfo = (task_power_info_t)info; tinfo->task_interrupt_wakeups = dead_task_statistics.task_interrupt_wakeups; tinfo->task_platform_idle_wakeups = dead_task_statistics.task_platform_idle_wakeups; tinfo->task_timer_wakeups_bin_1 = dead_task_statistics.task_timer_wakeups_bin_1; tinfo->task_timer_wakeups_bin_2 = dead_task_statistics.task_timer_wakeups_bin_2; tinfo->total_user = dead_task_statistics.total_user_time; tinfo->total_system = dead_task_statistics.total_system_time; return (KERN_SUCCESS); } default: return (KERN_INVALID_ARGUMENT); } }