void device_scheduler::timeslice() { bool call_debugger = ((machine().debug_flags & DEBUG_FLAG_ENABLED) != 0); // build the execution list if we don't have one yet if (UNEXPECTED(m_execute_list == NULL)) rebuild_execute_list(); // if the current quantum has expired, find a new one while (m_basetime >= m_quantum_list.first()->m_expire) m_quantum_allocator.reclaim(m_quantum_list.detach_head()); // loop until we hit the next timer while (m_basetime < m_timer_list->m_expire) { // by default, assume our target is the end of the next quantum attotime target = m_basetime + attotime(0, m_quantum_list.first()->m_actual); // however, if the next timer is going to fire before then, override if (m_timer_list->m_expire < target) target = m_timer_list->m_expire; // do we have pending suspension changes? if (m_suspend_changes_pending) apply_suspend_changes(); // loop over all CPUs for (device_execute_interface *exec = m_execute_list; exec != NULL; exec = exec->m_nextexec) { // only process if this CPU is executing or truly halted (not yielding) // and if our target is later than the CPU's current time (coarse check) if (EXPECTED((exec->m_suspend == 0 || exec->m_eatcycles) && target.seconds >= exec->m_localtime.seconds)) { // compute how many attoseconds to execute this CPU attoseconds_t delta = target.attoseconds - exec->m_localtime.attoseconds; if (delta < 0 && target.seconds > exec->m_localtime.seconds) delta += ATTOSECONDS_PER_SECOND; assert(delta == (target - exec->m_localtime).as_attoseconds()); // if we have enough for at least 1 cycle, do the math if (delta >= exec->m_attoseconds_per_cycle) { // compute how many cycles we want to execute int ran = exec->m_cycles_running = divu_64x32((UINT64)delta >> exec->m_divshift, exec->m_divisor); // if we're not suspended, actually execute if (exec->m_suspend == 0) { // note that this global variable cycles_stolen can be modified // via the call to cpu_execute exec->m_cycles_stolen = 0; m_executing_device = exec; *exec->m_icountptr = exec->m_cycles_running; if (!call_debugger) exec->run(); else { debugger_start_cpu_hook(&exec->device(), target); exec->run(); debugger_stop_cpu_hook(&exec->device()); } // adjust for any cycles we took back assert(ran >= *exec->m_icountptr); ran -= *exec->m_icountptr; assert(ran >= exec->m_cycles_stolen); ran -= exec->m_cycles_stolen; } // account for these cycles exec->m_totalcycles += ran; // update the local time for this CPU attotime delta(0, exec->m_attoseconds_per_cycle * ran); assert(delta >= attotime::zero); exec->m_localtime += delta; // if the new local CPU time is less than our target, move the target up, but not before the base if (exec->m_localtime < target) { target = max(exec->m_localtime, m_basetime); } } } }
void device_scheduler::timeslice() { bool call_debugger = ((machine().debug_flags & DEBUG_FLAG_ENABLED) != 0); // build the execution list if we don't have one yet if (m_execute_list == NULL) rebuild_execute_list(); // execute timers execute_timers(); // loop until we hit the next timer while (m_basetime < m_timer_list->m_expire) { // by default, assume our target is the end of the next quantum attotime target = m_basetime + attotime(0, m_quantum_list.first()->m_actual); // however, if the next timer is going to fire before then, override if (m_timer_list->m_expire < target) target = m_timer_list->m_expire; LOG(("------------------\n")); LOG(("cpu_timeslice: target = %s\n", target.as_string())); // apply pending suspension changes UINT32 suspendchanged = 0; for (device_execute_interface *exec = m_execute_list; exec != NULL; exec = exec->m_nextexec) { suspendchanged |= exec->m_suspend ^ exec->m_nextsuspend; exec->m_suspend = exec->m_nextsuspend; exec->m_nextsuspend &= ~SUSPEND_REASON_TIMESLICE; exec->m_eatcycles = exec->m_nexteatcycles; } // recompute the execute list if any CPUs changed their suspension state if (suspendchanged != 0) rebuild_execute_list(); // loop over non-suspended CPUs for (device_execute_interface *exec = m_execute_list; exec != NULL; exec = exec->m_nextexec) { // only process if our target is later than the CPU's current time (coarse check) if (target.seconds >= exec->m_localtime.seconds) { // compute how many attoseconds to execute this CPU attoseconds_t delta = target.attoseconds - exec->m_localtime.attoseconds; if (delta < 0 && target.seconds > exec->m_localtime.seconds) delta += ATTOSECONDS_PER_SECOND; assert(delta == (target - exec->m_localtime).as_attoseconds()); // if we have enough for at least 1 cycle, do the math if (delta >= exec->m_attoseconds_per_cycle) { // compute how many cycles we want to execute int ran = exec->m_cycles_running = divu_64x32((UINT64)delta >> exec->m_divshift, exec->m_divisor); LOG((" cpu '%s': %d cycles\n", exec->device().tag(), exec->m_cycles_running)); // if we're not suspended, actually execute if (exec->m_suspend == 0) { g_profiler.start(exec->m_profiler); // note that this global variable cycles_stolen can be modified // via the call to cpu_execute exec->m_cycles_stolen = 0; m_executing_device = exec; *exec->m_icountptr = exec->m_cycles_running; if (!call_debugger) exec->run(); else { debugger_start_cpu_hook(&exec->device(), target); exec->run(); debugger_stop_cpu_hook(&exec->device()); } // adjust for any cycles we took back assert(ran >= *exec->m_icountptr); ran -= *exec->m_icountptr; assert(ran >= exec->m_cycles_stolen); ran -= exec->m_cycles_stolen; g_profiler.stop(); } // account for these cycles exec->m_totalcycles += ran; // update the local time for this CPU attotime delta = attotime(0, exec->m_attoseconds_per_cycle * ran); assert(delta >= attotime::zero); exec->m_localtime += delta; LOG((" %d ran, %d total, time = %s\n", ran, (INT32)exec->m_totalcycles, exec->m_localtime.as_string())); // if the new local CPU time is less than our target, move the target up, but not before the base if (exec->m_localtime < target) { assert(exec->m_localtime < target); target = max(exec->m_localtime, m_basetime); LOG((" (new target)\n")); } } } }