static void DVDThread() { Common::SetCurrentThreadName("DVD thread"); while (true) { s_request_queue_expanded.Wait(); if (s_dvd_thread_exiting.IsSet()) return; ReadRequest request; while (s_request_queue.Pop(request)) { FileMonitor::Log(*s_disc, request.partition, request.dvd_offset); std::vector<u8> buffer(request.length); if (!s_disc->Read(request.dvd_offset, request.length, buffer.data(), request.partition)) buffer.resize(0); request.realtime_done_us = Common::Timer::GetTimeUs(); s_result_queue.Push(ReadResult(std::move(request), std::move(buffer))); s_result_queue_expanded.Set(); if (s_dvd_thread_exiting.IsSet()) return; } } }
void Start() { s_finish_read = CoreTiming::RegisterEvent("FinishReadDVDThread", FinishRead); s_request_queue_expanded.Reset(); s_result_queue_expanded.Reset(); s_request_queue.Clear(); s_result_queue.Clear(); // This is reset on every launch for determinism, but it doesn't matter // much, because this will never get exposed to the emulated game. s_next_id = 0; StartDVDThread(); }
static void FinishRead(u64 id, s64 cycles_late) { // We can't simply pop s_result_queue and always get the ReadResult // we want, because the DVD thread may add ReadResults to the queue // in a different order than we want to get them. What we do instead // is to pop the queue until we find the ReadResult we want (the one // whose ID matches userdata), which means we may end up popping // ReadResults that we don't want. We can't add those unwanted results // back to the queue, because the queue can only have one writer. // Instead, we add them to a map that only is used by the CPU thread. // When this function is called again later, it will check the map for // the wanted ReadResult before it starts searching through the queue. ReadResult result; auto it = s_result_map.find(id); if (it != s_result_map.end()) { result = std::move(it->second); s_result_map.erase(it); } else { while (true) { while (!s_result_queue.Pop(result)) s_result_queue_expanded.Wait(); if (result.first.id == id) break; else s_result_map.emplace(result.first.id, std::move(result)); } } // We have now obtained the right ReadResult. const ReadRequest& request = result.first; const std::vector<u8>& buffer = result.second; DEBUG_LOG(DVDINTERFACE, "Disc has been read. Real time: %" PRIu64 " us. " "Real time including delay: %" PRIu64 " us. " "Emulated time including delay: %" PRIu64 " us.", request.realtime_done_us - request.realtime_started_us, Common::Timer::GetTimeUs() - request.realtime_started_us, (CoreTiming::GetTicks() - request.time_started_ticks) / (SystemTimers::GetTicksPerSecond() / 1000000)); if (buffer.size() != request.length) { PanicAlertT("The disc could not be read (at 0x%" PRIx64 " - 0x%" PRIx64 ").", request.dvd_offset, request.dvd_offset + request.length); } else { if (request.copy_to_ram) Memory::CopyToEmu(request.output_address, buffer.data(), request.length); } // Notify the emulated software that the command has been executed DVDInterface::FinishExecutingCommand(request.reply_type, DVDInterface::INT_TCINT, cycles_late, buffer); }
static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { ASSERT(Core::IsCPUThread()); ReadRequest request; request.copy_to_ram = copy_to_ram; request.output_address = output_address; request.dvd_offset = dvd_offset; request.length = length; request.partition = partition; request.reply_type = reply_type; u64 id = s_next_id++; request.id = id; request.time_started_ticks = CoreTiming::GetTicks(); request.realtime_started_us = Common::Timer::GetTimeUs(); s_request_queue.Push(std::move(request)); s_request_queue_expanded.Set(); CoreTiming::ScheduleEvent(ticks_until_completion, s_finish_read, id); }
void MoveEvents() { for (Event ev; s_ts_queue.Pop(ev);) { ev.fifo_order = s_event_fifo_id++; s_event_queue.emplace_back(std::move(ev)); std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } }
void WaitUntilIdle() { ASSERT(Core::IsCPUThread()); while (!s_request_queue.Empty()) s_result_queue_expanded.Wait(); StopDVDThread(); StartDVDThread(); }
void DoState(PointerWrap& p) { // By waiting for the DVD thread to be done working, we ensure // that s_request_queue will be empty and that the DVD thread // won't be touching anything while this function runs. WaitUntilIdle(); // Move all results from s_result_queue to s_result_map because // PointerWrap::Do supports std::map but not Common::SPSCQueue. // This won't affect the behavior of FinishRead. ReadResult result; while (s_result_queue.Pop(result)) s_result_map.emplace(result.first.id, std::move(result)); // Both queues are now empty, so we don't need to savestate them. p.Do(s_result_map); p.Do(s_next_id); // s_disc isn't savestated (because it points to files on the // local system). Instead, we check that the status of the disc // is the same as when the savestate was made. This won't catch // cases of having the wrong disc inserted, though. // TODO: Check the game ID, disc number, revision? bool had_disc = HasDisc(); p.Do(had_disc); if (had_disc != HasDisc()) { if (had_disc) PanicAlertT("An inserted disc was expected but not found."); else s_disc.reset(); } // TODO: Savestates can be smaller if the buffers of results aren't saved, // but instead get re-read from the disc when loading the savestate. // TODO: It would be possible to create a savestate faster by stopping // the DVD thread regardless of whether there are pending requests. // After loading a savestate, the debug log in FinishRead will report // screwed up times for requests that were submitted before the savestate // was made. Handling that properly may be more effort than it's worth. }
void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, FromThread from) { ASSERT_MSG(POWERPC, event_type, "Event type is nullptr, will crash now."); bool from_cpu_thread; if (from == FromThread::ANY) { from_cpu_thread = Core::IsCPUThread(); } else { from_cpu_thread = from == FromThread::CPU; ASSERT_MSG(POWERPC, from_cpu_thread == Core::IsCPUThread(), "A \"%s\" event was scheduled from the wrong thread (%s)", event_type->name->c_str(), from_cpu_thread ? "CPU" : "non-CPU"); } if (from_cpu_thread) { s64 timeout = GetTicks() + cycles_into_future; // If this event needs to be scheduled before the next advance(), force one early if (!s_is_global_timer_sane) ForceExceptionCheck(cycles_into_future); s_event_queue.emplace_back(Event{timeout, s_event_fifo_id++, userdata, event_type}); std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } else { if (Core::WantsDeterminism()) { ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or " "movie play/record was active. This is likely to cause a desync.", event_type->name->c_str()); } std::lock_guard<std::mutex> lk(s_ts_write_lock); s_ts_queue.Push(Event{g.global_timer + cycles_into_future, 0, userdata, event_type}); } }
namespace DVDThread { struct ReadRequest { bool copy_to_ram; u32 output_address; u64 dvd_offset; u32 length; DiscIO::Partition partition; // This determines which code DVDInterface will run to reply // to the emulated software. We can't use callbacks, // because function pointers can't be stored in savestates. DVDInterface::ReplyType reply_type; // IDs are used to uniquely identify a request. They must not be // identical to IDs of any other requests that currently exist, but // it's fine to re-use IDs of requests that have existed in the past. u64 id; // Only used for logging u64 time_started_ticks; u64 realtime_started_us; u64 realtime_done_us; }; using ReadResult = std::pair<ReadRequest, std::vector<u8>>; static void StartDVDThread(); static void StopDVDThread(); static void DVDThread(); static void WaitUntilIdle(); static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion); static void FinishRead(u64 id, s64 cycles_late); static CoreTiming::EventType* s_finish_read; static u64 s_next_id = 0; static std::thread s_dvd_thread; static Common::Event s_request_queue_expanded; // Is set by CPU thread static Common::Event s_result_queue_expanded; // Is set by DVD thread static Common::Flag s_dvd_thread_exiting(false); // Is set by CPU thread static Common::SPSCQueue<ReadRequest, false> s_request_queue; static Common::SPSCQueue<ReadResult, false> s_result_queue; static std::map<u64, ReadResult> s_result_map; static std::unique_ptr<DiscIO::Volume> s_disc; void Start() { s_finish_read = CoreTiming::RegisterEvent("FinishReadDVDThread", FinishRead); s_request_queue_expanded.Reset(); s_result_queue_expanded.Reset(); s_request_queue.Clear(); s_result_queue.Clear(); // This is reset on every launch for determinism, but it doesn't matter // much, because this will never get exposed to the emulated game. s_next_id = 0; StartDVDThread(); } static void StartDVDThread() { ASSERT(!s_dvd_thread.joinable()); s_dvd_thread_exiting.Clear(); s_dvd_thread = std::thread(DVDThread); } void Stop() { StopDVDThread(); s_disc.reset(); } static void StopDVDThread() { ASSERT(s_dvd_thread.joinable()); // By setting s_DVD_thread_exiting, we ask the DVD thread to cleanly exit. // In case the request queue is empty, we need to set s_request_queue_expanded // so that the DVD thread will wake up and check s_DVD_thread_exiting. s_dvd_thread_exiting.Set(); s_request_queue_expanded.Set(); s_dvd_thread.join(); } void DoState(PointerWrap& p) { // By waiting for the DVD thread to be done working, we ensure // that s_request_queue will be empty and that the DVD thread // won't be touching anything while this function runs. WaitUntilIdle(); // Move all results from s_result_queue to s_result_map because // PointerWrap::Do supports std::map but not Common::SPSCQueue. // This won't affect the behavior of FinishRead. ReadResult result; while (s_result_queue.Pop(result)) s_result_map.emplace(result.first.id, std::move(result)); // Both queues are now empty, so we don't need to savestate them. p.Do(s_result_map); p.Do(s_next_id); // s_disc isn't savestated (because it points to files on the // local system). Instead, we check that the status of the disc // is the same as when the savestate was made. This won't catch // cases of having the wrong disc inserted, though. // TODO: Check the game ID, disc number, revision? bool had_disc = HasDisc(); p.Do(had_disc); if (had_disc != HasDisc()) { if (had_disc) PanicAlertT("An inserted disc was expected but not found."); else s_disc.reset(); } // TODO: Savestates can be smaller if the buffers of results aren't saved, // but instead get re-read from the disc when loading the savestate. // TODO: It would be possible to create a savestate faster by stopping // the DVD thread regardless of whether there are pending requests. // After loading a savestate, the debug log in FinishRead will report // screwed up times for requests that were submitted before the savestate // was made. Handling that properly may be more effort than it's worth. } void SetDisc(std::unique_ptr<DiscIO::Volume> disc) { WaitUntilIdle(); s_disc = std::move(disc); } bool HasDisc() { return s_disc != nullptr; } bool IsEncryptedAndHashed() { // IsEncryptedAndHashed is thread-safe, so calling WaitUntilIdle isn't necessary. return s_disc->IsEncryptedAndHashed(); } DiscIO::Platform GetDiscType() { // GetVolumeType is thread-safe, so calling WaitUntilIdle isn't necessary. return s_disc->GetVolumeType(); } u64 PartitionOffsetToRawOffset(u64 offset, const DiscIO::Partition& partition) { // PartitionOffsetToRawOffset is thread-safe, so calling WaitUntilIdle isn't necessary. return s_disc->PartitionOffsetToRawOffset(offset, partition); } IOS::ES::TMDReader GetTMD(const DiscIO::Partition& partition) { WaitUntilIdle(); return s_disc->GetTMD(partition); } IOS::ES::TicketReader GetTicket(const DiscIO::Partition& partition) { WaitUntilIdle(); return s_disc->GetTicket(partition); } bool IsInsertedDiscRunning() { if (!s_disc) return false; WaitUntilIdle(); return SConfig::GetInstance().GetGameID() == s_disc->GetGameID(); } bool UpdateRunningGameMetadata(const DiscIO::Partition& partition, std::optional<u64> title_id) { if (!s_disc) return false; WaitUntilIdle(); if (title_id) { const std::optional<u64> volume_title_id = s_disc->GetTitleID(partition); if (!volume_title_id || *volume_title_id != *title_id) return false; } SConfig::GetInstance().SetRunningGameMetadata(*s_disc, partition); return true; } void WaitUntilIdle() { ASSERT(Core::IsCPUThread()); while (!s_request_queue.Empty()) s_result_queue_expanded.Wait(); StopDVDThread(); StartDVDThread(); } void StartRead(u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { StartReadInternal(false, 0, dvd_offset, length, partition, reply_type, ticks_until_completion); } void StartReadToEmulatedRAM(u32 output_address, u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { StartReadInternal(true, output_address, dvd_offset, length, partition, reply_type, ticks_until_completion); } static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length, const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type, s64 ticks_until_completion) { ASSERT(Core::IsCPUThread()); ReadRequest request; request.copy_to_ram = copy_to_ram; request.output_address = output_address; request.dvd_offset = dvd_offset; request.length = length; request.partition = partition; request.reply_type = reply_type; u64 id = s_next_id++; request.id = id; request.time_started_ticks = CoreTiming::GetTicks(); request.realtime_started_us = Common::Timer::GetTimeUs(); s_request_queue.Push(std::move(request)); s_request_queue_expanded.Set(); CoreTiming::ScheduleEvent(ticks_until_completion, s_finish_read, id); } static void FinishRead(u64 id, s64 cycles_late) { // We can't simply pop s_result_queue and always get the ReadResult // we want, because the DVD thread may add ReadResults to the queue // in a different order than we want to get them. What we do instead // is to pop the queue until we find the ReadResult we want (the one // whose ID matches userdata), which means we may end up popping // ReadResults that we don't want. We can't add those unwanted results // back to the queue, because the queue can only have one writer. // Instead, we add them to a map that only is used by the CPU thread. // When this function is called again later, it will check the map for // the wanted ReadResult before it starts searching through the queue. ReadResult result; auto it = s_result_map.find(id); if (it != s_result_map.end()) { result = std::move(it->second); s_result_map.erase(it); } else { while (true) { while (!s_result_queue.Pop(result)) s_result_queue_expanded.Wait(); if (result.first.id == id) break; else s_result_map.emplace(result.first.id, std::move(result)); } } // We have now obtained the right ReadResult. const ReadRequest& request = result.first; const std::vector<u8>& buffer = result.second; DEBUG_LOG(DVDINTERFACE, "Disc has been read. Real time: %" PRIu64 " us. " "Real time including delay: %" PRIu64 " us. " "Emulated time including delay: %" PRIu64 " us.", request.realtime_done_us - request.realtime_started_us, Common::Timer::GetTimeUs() - request.realtime_started_us, (CoreTiming::GetTicks() - request.time_started_ticks) / (SystemTimers::GetTicksPerSecond() / 1000000)); if (buffer.size() != request.length) { PanicAlertT("The disc could not be read (at 0x%" PRIx64 " - 0x%" PRIx64 ").", request.dvd_offset, request.dvd_offset + request.length); } else { if (request.copy_to_ram) Memory::CopyToEmu(request.output_address, buffer.data(), request.length); } // Notify the emulated software that the command has been executed DVDInterface::FinishExecutingCommand(request.reply_type, DVDInterface::INT_TCINT, cycles_late, buffer); } static void DVDThread() { Common::SetCurrentThreadName("DVD thread"); while (true) { s_request_queue_expanded.Wait(); if (s_dvd_thread_exiting.IsSet()) return; ReadRequest request; while (s_request_queue.Pop(request)) { FileMonitor::Log(*s_disc, request.partition, request.dvd_offset); std::vector<u8> buffer(request.length); if (!s_disc->Read(request.dvd_offset, request.length, buffer.data(), request.partition)) buffer.resize(0); request.realtime_done_us = Common::Timer::GetTimeUs(); s_result_queue.Push(ReadResult(std::move(request), std::move(buffer))); s_result_queue_expanded.Set(); if (s_dvd_thread_exiting.IsSet()) return; } } } } // namespace DVDThread
namespace CoreTiming { struct EventType { TimedCallback callback; const std::string* name; }; struct Event { s64 time; u64 fifo_order; u64 userdata; EventType* type; }; // Sort by time, unless the times are the same, in which case sort by the order added to the queue static bool operator>(const Event& left, const Event& right) { return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order); } static bool operator<(const Event& left, const Event& right) { return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order); } // unordered_map stores each element separately as a linked list node so pointers to elements // remain stable regardless of rehashes/resizing. static std::unordered_map<std::string, EventType> s_event_types; // STATE_TO_SAVE // The queue is a min-heap using std::make_heap/push_heap/pop_heap. // We don't use std::priority_queue because we need to be able to serialize, unserialize and // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated // by the standard adaptor class. static std::vector<Event> s_event_queue; static u64 s_event_fifo_id; static std::mutex s_ts_write_lock; static Common::SPSCQueue<Event, false> s_ts_queue; static float s_last_OC_factor; static constexpr int MAX_SLICE_LENGTH = 20000; static s64 s_idled_cycles; static u32 s_fake_dec_start_value; static u64 s_fake_dec_start_ticks; // Are we in a function that has been called from Advance() static bool s_is_global_timer_sane; Globals g; static EventType* s_ev_lost = nullptr; static void EmptyTimedCallback(u64 userdata, s64 cyclesLate) { } // Changing the CPU speed in Dolphin isn't actually done by changing the physical clock rate, // but by changing the amount of work done in a particular amount of time. This tends to be more // compatible because it stops the games from actually knowing directly that the clock rate has // changed, and ensures that anything based on waiting a specific number of cycles still works. // // Technically it might be more accurate to call this changing the IPC instead of the CPU speed, // but the effect is largely the same. static int DowncountToCycles(int downcount) { return static_cast<int>(downcount * g.last_OC_factor_inverted); } static int CyclesToDowncount(int cycles) { return static_cast<int>(cycles * s_last_OC_factor); } EventType* RegisterEvent(const std::string& name, TimedCallback callback) { // check for existing type with same name. // we want event type names to remain unique so that we can use them for serialization. ASSERT_MSG(POWERPC, s_event_types.find(name) == s_event_types.end(), "CoreTiming Event \"%s\" is already registered. Events should only be registered " "during Init to avoid breaking save states.", name.c_str()); auto info = s_event_types.emplace(name, EventType{callback, nullptr}); EventType* event_type = &info.first->second; event_type->name = &info.first->first; return event_type; } void UnregisterAllEvents() { ASSERT_MSG(POWERPC, s_event_queue.empty(), "Cannot unregister events with events pending"); s_event_types.clear(); } void Init() { s_last_OC_factor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f; g.last_OC_factor_inverted = 1.0f / s_last_OC_factor; PowerPC::ppcState.downcount = CyclesToDowncount(MAX_SLICE_LENGTH); g.slice_length = MAX_SLICE_LENGTH; g.global_timer = 0; s_idled_cycles = 0; // The time between CoreTiming being intialized and the first call to Advance() is considered // the slice boundary between slice -1 and slice 0. Dispatcher loops must call Advance() before // executing the first PPC cycle of each slice to prepare the slice length and downcount for // that slice. s_is_global_timer_sane = true; s_event_fifo_id = 0; s_ev_lost = RegisterEvent("_lost_event", &EmptyTimedCallback); } void Shutdown() { std::lock_guard<std::mutex> lk(s_ts_write_lock); MoveEvents(); ClearPendingEvents(); UnregisterAllEvents(); } void DoState(PointerWrap& p) { std::lock_guard<std::mutex> lk(s_ts_write_lock); p.Do(g.slice_length); p.Do(g.global_timer); p.Do(s_idled_cycles); p.Do(s_fake_dec_start_value); p.Do(s_fake_dec_start_ticks); p.Do(g.fake_TB_start_value); p.Do(g.fake_TB_start_ticks); p.Do(s_last_OC_factor); g.last_OC_factor_inverted = 1.0f / s_last_OC_factor; p.Do(s_event_fifo_id); p.DoMarker("CoreTimingData"); MoveEvents(); p.DoEachElement(s_event_queue, [](PointerWrap& pw, Event& ev) { pw.Do(ev.time); pw.Do(ev.fifo_order); // this is why we can't have (nice things) pointers as userdata pw.Do(ev.userdata); // we can't savestate ev.type directly because events might not get registered in the same // order (or at all) every time. // so, we savestate the event's type's name, and derive ev.type from that when loading. std::string name; if (pw.GetMode() != PointerWrap::MODE_READ) name = *ev.type->name; pw.Do(name); if (pw.GetMode() == PointerWrap::MODE_READ) { auto itr = s_event_types.find(name); if (itr != s_event_types.end()) { ev.type = &itr->second; } else { WARN_LOG(POWERPC, "Lost event from savestate because its type, \"%s\", has not been registered.", name.c_str()); ev.type = s_ev_lost; } } }); p.DoMarker("CoreTimingEvents"); // When loading from a save state, we must assume the Event order is random and meaningless. // The exact layout of the heap in memory is implementation defined, therefore it is platform // and library version specific. if (p.GetMode() == PointerWrap::MODE_READ) std::make_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } // This should only be called from the CPU thread. If you are calling // it from any other thread, you are doing something evil u64 GetTicks() { u64 ticks = static_cast<u64>(g.global_timer); if (!s_is_global_timer_sane) { int downcount = DowncountToCycles(PowerPC::ppcState.downcount); ticks += g.slice_length - downcount; } return ticks; } u64 GetIdleTicks() { return static_cast<u64>(s_idled_cycles); } void ClearPendingEvents() { s_event_queue.clear(); } void ScheduleEvent(s64 cycles_into_future, EventType* event_type, u64 userdata, FromThread from) { ASSERT_MSG(POWERPC, event_type, "Event type is nullptr, will crash now."); bool from_cpu_thread; if (from == FromThread::ANY) { from_cpu_thread = Core::IsCPUThread(); } else { from_cpu_thread = from == FromThread::CPU; ASSERT_MSG(POWERPC, from_cpu_thread == Core::IsCPUThread(), "A \"%s\" event was scheduled from the wrong thread (%s)", event_type->name->c_str(), from_cpu_thread ? "CPU" : "non-CPU"); } if (from_cpu_thread) { s64 timeout = GetTicks() + cycles_into_future; // If this event needs to be scheduled before the next advance(), force one early if (!s_is_global_timer_sane) ForceExceptionCheck(cycles_into_future); s_event_queue.emplace_back(Event{timeout, s_event_fifo_id++, userdata, event_type}); std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } else { if (Core::WantsDeterminism()) { ERROR_LOG(POWERPC, "Someone scheduled an off-thread \"%s\" event while netplay or " "movie play/record was active. This is likely to cause a desync.", event_type->name->c_str()); } std::lock_guard<std::mutex> lk(s_ts_write_lock); s_ts_queue.Push(Event{g.global_timer + cycles_into_future, 0, userdata, event_type}); } } void RemoveEvent(EventType* event_type) { auto itr = std::remove_if(s_event_queue.begin(), s_event_queue.end(), [&](const Event& e) { return e.type == event_type; }); // Removing random items breaks the invariant so we have to re-establish it. if (itr != s_event_queue.end()) { s_event_queue.erase(itr, s_event_queue.end()); std::make_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } } void RemoveAllEvents(EventType* event_type) { MoveEvents(); RemoveEvent(event_type); } void ForceExceptionCheck(s64 cycles) { cycles = std::max<s64>(0, cycles); if (DowncountToCycles(PowerPC::ppcState.downcount) > cycles) { // downcount is always (much) smaller than MAX_INT so we can safely cast cycles to an int here. // Account for cycles already executed by adjusting the g.slice_length g.slice_length -= DowncountToCycles(PowerPC::ppcState.downcount) - static_cast<int>(cycles); PowerPC::ppcState.downcount = CyclesToDowncount(static_cast<int>(cycles)); } } void MoveEvents() { for (Event ev; s_ts_queue.Pop(ev);) { ev.fifo_order = s_event_fifo_id++; s_event_queue.emplace_back(std::move(ev)); std::push_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); } } void Advance() { MoveEvents(); int cyclesExecuted = g.slice_length - DowncountToCycles(PowerPC::ppcState.downcount); g.global_timer += cyclesExecuted; s_last_OC_factor = SConfig::GetInstance().m_OCEnable ? SConfig::GetInstance().m_OCFactor : 1.0f; g.last_OC_factor_inverted = 1.0f / s_last_OC_factor; g.slice_length = MAX_SLICE_LENGTH; s_is_global_timer_sane = true; while (!s_event_queue.empty() && s_event_queue.front().time <= g.global_timer) { Event evt = std::move(s_event_queue.front()); std::pop_heap(s_event_queue.begin(), s_event_queue.end(), std::greater<Event>()); s_event_queue.pop_back(); // NOTICE_LOG(POWERPC, "[Scheduler] %-20s (%lld, %lld)", evt.type->name->c_str(), // g.global_timer, evt.time); evt.type->callback(evt.userdata, g.global_timer - evt.time); } s_is_global_timer_sane = false; // Still events left (scheduled in the future) if (!s_event_queue.empty()) { g.slice_length = static_cast<int>( std::min<s64>(s_event_queue.front().time - g.global_timer, MAX_SLICE_LENGTH)); } PowerPC::ppcState.downcount = CyclesToDowncount(g.slice_length); // Check for any external exceptions. // It's important to do this after processing events otherwise any exceptions will be delayed // until the next slice: // Pokemon Box refuses to boot if the first exception from the audio DMA is received late PowerPC::CheckExternalExceptions(); } void LogPendingEvents() { auto clone = s_event_queue; std::sort(clone.begin(), clone.end()); for (const Event& ev : clone) { INFO_LOG(POWERPC, "PENDING: Now: %" PRId64 " Pending: %" PRId64 " Type: %s", g.global_timer, ev.time, ev.type->name->c_str()); } } // Should only be called from the CPU thread after the PPC clock has changed void AdjustEventQueueTimes(u32 new_ppc_clock, u32 old_ppc_clock) { for (Event& ev : s_event_queue) { const s64 ticks = (ev.time - g.global_timer) * new_ppc_clock / old_ppc_clock; ev.time = g.global_timer + ticks; } } void Idle() { if (SConfig::GetInstance().bSyncGPUOnSkipIdleHack) { // When the FIFO is processing data we must not advance because in this way // the VI will be desynchronized. So, We are waiting until the FIFO finish and // while we process only the events required by the FIFO. Fifo::FlushGpu(); } s_idled_cycles += DowncountToCycles(PowerPC::ppcState.downcount); PowerPC::ppcState.downcount = 0; } std::string GetScheduledEventsSummary() { std::string text = "Scheduled events\n"; text.reserve(1000); auto clone = s_event_queue; std::sort(clone.begin(), clone.end()); for (const Event& ev : clone) { text += StringFromFormat("%s : %" PRIi64 " %016" PRIx64 "\n", ev.type->name->c_str(), ev.time, ev.userdata); } return text; } u32 GetFakeDecStartValue() { return s_fake_dec_start_value; } void SetFakeDecStartValue(u32 val) { s_fake_dec_start_value = val; } u64 GetFakeDecStartTicks() { return s_fake_dec_start_ticks; } void SetFakeDecStartTicks(u64 val) { s_fake_dec_start_ticks = val; } u64 GetFakeTBStartValue() { return g.fake_TB_start_value; } void SetFakeTBStartValue(u64 val) { g.fake_TB_start_value = val; } u64 GetFakeTBStartTicks() { return g.fake_TB_start_ticks; } void SetFakeTBStartTicks(u64 val) { g.fake_TB_start_ticks = val; } } // namespace