示例#1
0
u32 VideoBackendHardware::Video_AccessEFB(EFBAccessType type, u32 x, u32 y, u32 InputData)
{
	if (s_BackendInitialized && g_ActiveConfig.bEFBAccessEnable)
	{
		s_accessEFBArgs.type = type;
		s_accessEFBArgs.x = x;
		s_accessEFBArgs.y = y;
		s_accessEFBArgs.Data = InputData;

		s_efbAccessRequested.Set();

		if (SConfig::GetInstance().m_LocalCoreStartupParameter.bCPUThread)
		{
			s_efbAccessReadyEvent.Reset();
			if (s_FifoShuttingDown.IsSet())
				return 0;
			s_efbAccessRequested.Set();
			s_efbAccessReadyEvent.Wait();
		}
		else
			VideoFifo_CheckEFBAccess();

		return s_AccessEFBResult;
	}

	return 0;
}
示例#2
0
static void DVDThread()
{
  Common::SetCurrentThreadName("DVD thread");

  while (true)
  {
    s_request_queue_expanded.Wait();

    if (s_dvd_thread_exiting.IsSet())
      return;

    ReadRequest request;
    while (s_request_queue.Pop(request))
    {
      FileMonitor::Log(*s_disc, request.partition, request.dvd_offset);

      std::vector<u8> buffer(request.length);
      if (!s_disc->Read(request.dvd_offset, request.length, buffer.data(), request.partition))
        buffer.resize(0);

      request.realtime_done_us = Common::Timer::GetTimeUs();

      s_result_queue.Push(ReadResult(std::move(request), std::move(buffer)));
      s_result_queue_expanded.Set();

      if (s_dvd_thread_exiting.IsSet())
        return;
    }
  }
}
示例#3
0
static void VideoFifo_CheckPerfQueryRequest()
{
	if (s_perfQueryRequested.IsSet())
	{
		g_perf_query->FlushResults();
		s_perfQueryRequested.Clear();
		s_perfQueryReadyEvent.Set();
	}
}
示例#4
0
void VideoFifo_CheckEFBAccess()
{
	if (s_efbAccessRequested.IsSet())
	{
		s_AccessEFBResult = g_renderer->AccessEFB(s_accessEFBArgs.type, s_accessEFBArgs.x, s_accessEFBArgs.y, s_accessEFBArgs.Data);
		s_efbAccessRequested.Clear();
		s_efbAccessReadyEvent.Set();
	}
}
示例#5
0
// Run from the graphics thread (from Fifo.cpp)
static void VideoFifo_CheckSwapRequest()
{
	if (g_ActiveConfig.bUseXFB)
	{
		if (s_swapRequested.IsSet())
		{
			EFBRectangle rc;
			Renderer::Swap(s_beginFieldArgs.xfbAddr, s_beginFieldArgs.fbWidth, s_beginFieldArgs.fbHeight,rc);
			s_swapRequested.Clear();
		}
	}
}
示例#6
0
void VideoBackendHardware::InitializeShared()
{
	VideoCommon_Init();

	s_swapRequested.Clear();
	s_efbAccessRequested.Clear();
	s_perfQueryRequested.Clear();
	s_FifoShuttingDown.Clear();
	memset((void*)&s_beginFieldArgs, 0, sizeof(s_beginFieldArgs));
	memset(&s_accessEFBArgs, 0, sizeof(s_accessEFBArgs));
	s_AccessEFBResult = 0;
	m_invalid = false;
}
示例#7
0
void VideoBackendHardware::Video_ExitLoop()
{
	ExitGpuLoop();
	s_FifoShuttingDown.Set();
	s_efbAccessReadyEvent.Set();
	s_perfQueryReadyEvent.Set();
}
示例#8
0
// Run from the CPU thread (from VideoInterface.cpp)
void VideoBackendHardware::Video_EndField()
{
	if (s_BackendInitialized)
	{
		s_swapRequested.Set();
	}
}
示例#9
0
void Host_Message(int Id)
{
  if (Id == WM_USER_STOP)
  {
    s_running.Clear();
    updateMainFrameEvent.Set();
  }
}
示例#10
0
 virtual void MainLoop()
 {
   while (s_running.IsSet())
   {
     Core::HostDispatchJobs();
     std::this_thread::sleep_for(std::chrono::milliseconds(100));
   }
 }
示例#11
0
void VideoBackendHardware::InitializeShared()
{
	VideoCommon_Init();

	s_FifoShuttingDown.Clear();
	memset((void*)&s_beginFieldArgs, 0, sizeof(s_beginFieldArgs));
	m_invalid = false;
}
示例#12
0
static void signal_handler(int)
{
  const char message[] = "A signal was received. A second signal will force Dolphin to stop.\n";
  if (write(STDERR_FILENO, message, sizeof(message)) < 0)
  {
  }
  s_shutdown_requested.Set();
}
示例#13
0
文件: Fifo.cpp 项目: stenzek/dolphin
void EmulatorState(bool running)
{
  s_emu_running_state.Set(running);
  if (running)
    s_gpu_mainloop.Wakeup();
  else
    s_gpu_mainloop.AllowSleep();
}
示例#14
0
文件: Fifo.cpp 项目: stenzek/dolphin
// May be executed from any thread, even the graphics thread.
// Created to allow for self shutdown.
void ExitGpuLoop()
{
  // This should break the wait loop in CPU thread
  CommandProcessor::fifo.bFF_GPReadEnable = false;
  FlushGpu();

  // Terminate GPU thread loop
  s_emu_running_state.Set();
  s_gpu_mainloop.Stop(false);
}
示例#15
0
void HiresTexture::Shutdown()
{
  if (s_prefetcher.joinable())
  {
    s_textureCacheAbortLoading.Set();
    s_prefetcher.join();
  }

  s_textureMap.clear();
  s_textureCache.clear();
}
示例#16
0
static void StopDVDThread()
{
  ASSERT(s_dvd_thread.joinable());

  // By setting s_DVD_thread_exiting, we ask the DVD thread to cleanly exit.
  // In case the request queue is empty, we need to set s_request_queue_expanded
  // so that the DVD thread will wake up and check s_DVD_thread_exiting.
  s_dvd_thread_exiting.Set();
  s_request_queue_expanded.Set();

  s_dvd_thread.join();
}
示例#17
0
u32 VideoBackendHardware::Video_GetQueryResult(PerfQueryType type)
{
	if (!g_perf_query->ShouldEmulate())
	{
		return 0;
	}

	// TODO: Is this check sane?
	if (!g_perf_query->IsFlushed())
	{
		if (SConfig::GetInstance().m_LocalCoreStartupParameter.bCPUThread)
		{
			s_perfQueryReadyEvent.Reset();
			if (s_FifoShuttingDown.IsSet())
				return 0;
			s_perfQueryRequested.Set();
			s_perfQueryReadyEvent.Wait();
		}
		else
			g_perf_query->FlushResults();
	}

	return g_perf_query->GetQueryResult(type);
}
示例#18
0
// Run from the graphics thread (from Fifo.cpp)
void VideoFifo_CheckSwapRequestAt(u32 xfbAddr, u32 fbWidth, u32 fbHeight)
{
	if (g_ActiveConfig.bUseXFB)
	{
		if (s_swapRequested.IsSet())
		{
			u32 aLower = xfbAddr;
			u32 aUpper = xfbAddr + 2 * fbWidth * fbHeight;
			u32 bLower = s_beginFieldArgs.xfbAddr;
			u32 bUpper = s_beginFieldArgs.xfbAddr + 2 * s_beginFieldArgs.fbWidth * s_beginFieldArgs.fbHeight;

			if (addrRangesOverlap(aLower, aUpper, bLower, bUpper))
				VideoFifo_CheckSwapRequest();
		}
	}
}
示例#19
0
void HiresTexture::Prefetch()
{
	Common::SetCurrentThreadName("Prefetcher");

	u32 starttime = Common::Timer::GetTimeMs();
	for (const auto& entry : s_textureMap)
	{
		const std::string& base_filename = entry.first;

		std::unique_lock<std::mutex> lk(s_textureCacheMutex);

		auto iter = s_textureCache.find(base_filename);
		if (iter == s_textureCache.end())
		{
			lk.unlock();
			HiresTexture* ptr = Load(base_filename, [](size_t requested_size)
			{
				return new u8[requested_size];
			}, true);
			lk.lock();
			if (ptr != nullptr)
			{
				size_sum.fetch_add(ptr->m_cached_data_size);
				iter = s_textureCache.insert(iter, std::make_pair(base_filename, std::shared_ptr<HiresTexture>(ptr)));
			}
		}

		if (s_textureCacheAbortLoading.IsSet())
		{
			return;
		}

		if (size_sum.load() > max_mem)
		{
			g_Config.bCacheHiresTextures = false;

			OSD::AddMessage(StringFromFormat("Custom Textures prefetching after %.1f MB aborted, not enough RAM available", size_sum / (1024.0 * 1024.0)), 10000);
			return;
		}
	}
	u32 stoptime = Common::Timer::GetTimeMs();
	OSD::AddMessage(StringFromFormat("Custom Textures loaded, %.1f MB in %.1f s", size_sum / (1024.0 * 1024.0), (stoptime - starttime) / 1000.0), 10000);
}
示例#20
0
文件: MainBase.cpp 项目: E2xD/dolphin
void VideoBackendBase::InitializeShared()
{
  memset(&g_main_cp_state, 0, sizeof(g_main_cp_state));
  memset(&g_preprocess_cp_state, 0, sizeof(g_preprocess_cp_state));
  memset(texMem, 0, TMEM_SIZE);

  // Do our OSD callbacks
  OSD::DoCallbacks(OSD::CallbackType::Initialization);

  // do not initialize again for the config window
  m_initialized = true;

  s_FifoShuttingDown.Clear();
  memset((void*)&s_beginFieldArgs, 0, sizeof(s_beginFieldArgs));
  m_invalid = false;
  frameCount = 0;

  CommandProcessor::Init();
  Fifo::Init();
  OpcodeDecoder::Init();
  PixelEngine::Init();
  BPInit();
  VertexLoaderManager::Init();
  IndexGenerator::Init();
  VertexShaderManager::Init();
  GeometryShaderManager::Init();
  PixelShaderManager::Init();

  g_Config.Load(File::GetUserPath(D_CONFIG_IDX) + "GFX.ini");
  g_Config.GameIniLoad();
  g_Config.UpdateProjectionHack();
  g_Config.VerifyValidity();
  UpdateActiveConfig();

  // Notify the core that the video backend is ready
  Host_Message(WM_USER_CREATE);
}
示例#21
0
void HostDispatchJobs()
{
  // WARNING: This should only run on the Host Thread.
  // NOTE: This function is potentially re-entrant. If a job calls
  //   Core::Stop for instance then we'll enter this a second time.
  std::unique_lock<std::mutex> guard(s_host_jobs_lock);
  while (!s_host_jobs_queue.empty())
  {
    HostJob job = std::move(s_host_jobs_queue.front());
    s_host_jobs_queue.pop();

    // NOTE: Memory ordering is important. The booting flag needs to be
    //   checked first because the state transition is:
    //   CORE_UNINITIALIZED: s_is_booting -> s_hardware_initialized
    //   We need to check variables in the same order as the state
    //   transition, otherwise we race and get transient failures.
    if (!job.run_after_stop && !s_is_booting.IsSet() && !IsRunning())
      continue;

    guard.unlock();
    job.job();
    guard.lock();
  }
}
示例#22
0
void HiresTexture::Update()
{
  bool BuildMaterialMaps = g_ActiveConfig.bHiresMaterialMapsBuild;
  if (s_prefetcher.joinable())
  {
    s_textureCacheAbortLoading.Set();
    s_prefetcher.join();
  }

  if (!g_ActiveConfig.bHiresTextures)
  {
    s_textureMap.clear();
    s_textureCache.clear();
    size_sum.store(0);
    return;
  }

  if (!g_ActiveConfig.bCacheHiresTextures)
  {
    s_textureCache.clear();
    size_sum.store(0);
  }

  s_textureMap.clear();
  const std::string& game_id = SConfig::GetInstance().GetGameID();
  const std::string texture_directory = GetTextureDirectory(game_id);

  std::string ddscode(".dds");
  std::string cddscode(".DDS");
  std::vector<std::string> Extensions;
  Extensions.push_back(".png");
  if (!BuildMaterialMaps)
  {
    Extensions.push_back(".dds");
  }

  std::vector<std::string> filenames =
      Common::DoFileSearch({texture_directory}, Extensions, /*recursive*/ true);

  const std::string miptag = "mip";

  for (const std::string& fileitem : filenames)
  {
    std::string filename;
    std::string Extension;
    SplitPath(fileitem, nullptr, &filename, &Extension);
    if (filename.substr(0, s_format_prefix.length()) != s_format_prefix)
    {
      // Discard wrong files
      continue;
    }
    size_t map_index = 0;
    size_t max_type = BuildMaterialMaps ? MapType::specular : MapType::normal;
    bool arbitrary_mips = false;
    for (size_t tag = 1; tag <= MapType::specular; tag++)
    {
      if (StringEndsWith(filename, s_maps_tags[tag]))
      {
        map_index = tag;
        filename = filename.substr(0, filename.size() - s_maps_tags[tag].size());
        break;
      }
    }
    if (map_index > max_type)
    {
      continue;
    }
    if (BuildMaterialMaps && map_index == MapType::material)
    {
      continue;
    }
    else if (!BuildMaterialMaps && map_index == MapType::color)
    {
      const size_t arb_index = filename.rfind("_arb");
      arbitrary_mips = arb_index != std::string::npos;
      if (arbitrary_mips)
        filename.erase(arb_index, 4);
    }
    const bool is_compressed = Extension.compare(ddscode) == 0 || Extension.compare(cddscode) == 0;
    hires_mip_level mip_level_detail(fileitem, Extension, is_compressed);
    u32 level = 0;
    size_t idx = filename.find_last_of('_');
    std::string miplevel = filename.substr(idx + 1, std::string::npos);
    if (miplevel.substr(0, miptag.length()) == miptag)
    {
      sscanf(miplevel.substr(3, std::string::npos).c_str(), "%i", &level);
      filename = filename.substr(0, idx);
    }
    HiresTextureCache::iterator iter = s_textureMap.find(filename);
    u32 min_item_size = level + 1;
    if (iter == s_textureMap.end())
    {
      HiresTextureCacheItem item(min_item_size);
      if (arbitrary_mips)
      {
        item.has_arbitrary_mips = true;
      }
      item.maps[map_index].resize(min_item_size);
      std::vector<hires_mip_level>& dst = item.maps[map_index];
      dst[level] = mip_level_detail;
      s_textureMap.emplace(filename, item);
    }
    else
    {
      std::vector<hires_mip_level>& dst = iter->second.maps[map_index];
      if (arbitrary_mips)
      {
        iter->second.has_arbitrary_mips = true;
      }
      if (dst.size() < min_item_size)
      {
        dst.resize(min_item_size);
      }
      dst[level] = mip_level_detail;
    }
  }

  if (g_ActiveConfig.bCacheHiresTextures && s_textureMap.size() > 0)
  {
    // remove cached but deleted textures
    auto iter = s_textureCache.begin();
    while (iter != s_textureCache.end())
    {
      if (s_textureMap.find(iter->first) == s_textureMap.end())
      {
        size_sum.fetch_sub(iter->second->m_cached_data_size);
        iter = s_textureCache.erase(iter);
      }
      else
      {
        iter++;
      }
    }
    s_textureCacheAbortLoading.Clear();
    s_prefetcher = std::thread(Prefetch);
    if (g_ActiveConfig.bWaitForCacheHiresTextures && s_prefetcher.joinable())
    {
      s_prefetcher.join();
    }
  }
}
示例#23
0
void HiresTexture::Prefetch()
{
  Common::SetCurrentThreadName("Prefetcher");
  const size_t total = s_textureMap.size();
  size_t count = 0;
  size_t notification = 10;
  u32 starttime = Common::Timer::GetTimeMs();
  for (const auto& entry : s_textureMap)
  {
    const std::string& base_filename = entry.first;

    std::unique_lock<std::mutex> lk(s_textureCacheMutex);

    auto iter = s_textureCache.find(base_filename);
    if (iter == s_textureCache.end())
    {
      lk.unlock();
      HiresTexture* ptr =
          Load(base_filename, [](size_t requested_size) { return new u8[requested_size]; }, true);
      lk.lock();
      if (ptr != nullptr)
      {
        size_sum.fetch_add(ptr->m_cached_data_size);
        iter = s_textureCache.insert(
            iter, std::make_pair(base_filename, std::shared_ptr<HiresTexture>(ptr)));
      }
    }

    if (s_textureCacheAbortLoading.IsSet())
    {
      if (g_ActiveConfig.bWaitForCacheHiresTextures)
      {
        Host_UpdateProgressDialog("", -1, -1);
      }
      return;
    }

    if (size_sum.load() > max_mem)
    {
      Config::SetCurrent(Config::GFX_HIRES_TEXTURES, false);

      OSD::AddMessage(
          StringFromFormat(
              "Custom Textures prefetching after %.1f MB aborted, not enough RAM available",
              size_sum / (1024.0 * 1024.0)),
          10000);
      if (g_ActiveConfig.bWaitForCacheHiresTextures)
      {
        Host_UpdateProgressDialog("", -1, -1);
      }
      return;
    }
    count++;
    size_t percent = (count * 100) / total;
    if (percent >= notification)
    {
      if (g_ActiveConfig.bWaitForCacheHiresTextures)
      {
        Host_UpdateProgressDialog(GetStringT("Prefetching Custom Textures...").c_str(),
                                  static_cast<int>(count), static_cast<int>(total));
      }
      else
      {
        OSD::AddMessage(StringFromFormat("Custom Textures prefetching %.1f MB %zu %% finished",
                                         size_sum / (1024.0 * 1024.0), percent),
                        2000);
      }
      notification += 10;
    }
  }
  if (g_ActiveConfig.bWaitForCacheHiresTextures)
  {
    Host_UpdateProgressDialog("", -1, -1);
  }
  u32 stoptime = Common::Timer::GetTimeMs();
  OSD::AddMessage(StringFromFormat("Custom Textures loaded, %.1f MB in %.1f s",
                                   size_sum / (1024.0 * 1024.0), (stoptime - starttime) / 1000.0),
                  10000);
}
示例#24
0
static void StartDVDThread()
{
  ASSERT(!s_dvd_thread.joinable());
  s_dvd_thread_exiting.Clear();
  s_dvd_thread = std::thread(DVDThread);
}
示例#25
0
void HiresTexture::Update()
{
	s_check_native_format = false;
	s_check_new_format = false;

	if (s_prefetcher.joinable())
	{
		s_textureCacheAbortLoading.Set();
		s_prefetcher.join();
	}

	if (!g_ActiveConfig.bHiresTextures)
	{
		s_textureMap.clear();
		s_textureCache.clear();
		size_sum.store(0);
		return;
	}

	if (!g_ActiveConfig.bCacheHiresTextures)
	{
		s_textureCache.clear();
		size_sum.store(0);
	}
	
	s_textureMap.clear();
	const std::string& gameCode = SConfig::GetInstance().m_strUniqueID;
	std::string szDir = StringFromFormat("%s%s", File::GetUserPath(D_HIRESTEXTURES_IDX).c_str(), gameCode.c_str());	
	std::string ddscode(".dds");
	std::string cddscode(".DDS");
	std::vector<std::string> Extensions = {
		".png",
		".dds"
	};

	auto rFilenames = DoFileSearch(Extensions, { szDir }, /*recursive*/ true);

	const std::string code = StringFromFormat("%s_", gameCode.c_str());
	const std::string miptag = "mip";
	const std::string normaltag = ".nrm";
	for (u32 i = 0; i < rFilenames.size(); i++)
	{
		std::string FileName;
		std::string Extension;
		SplitPath(rFilenames[i], nullptr, &FileName, &Extension);
		if (FileName.substr(0, code.length()) == code)
		{
			s_check_native_format = true;
		}
		else if (FileName.substr(0, s_format_prefix.length()) == s_format_prefix)
		{
			s_check_new_format = true;
		}
		else
		{
			// Discard wrong files
			continue;
		}
		const bool is_compressed = Extension.compare(ddscode) == 0 || Extension.compare(cddscode) == 0;
		const bool is_normal_map = hasEnding(FileName, normaltag);
		if (is_normal_map)
		{
			FileName = FileName.substr(0, FileName.size() - normaltag.size());
		}
		hires_mip_level mip_level_detail(rFilenames[i], Extension, is_compressed);
		u32 level = 0;
		size_t idx = FileName.find_last_of('_');
		std::string miplevel = FileName.substr(idx + 1, std::string::npos);
		if (miplevel.substr(0, miptag.length()) == miptag)
		{
			sscanf(miplevel.substr(3, std::string::npos).c_str(), "%i", &level);
			FileName = FileName.substr(0, idx);
		}
		HiresTextureCache::iterator iter = s_textureMap.find(FileName);
		u32 min_item_size = level + 1;
		if (iter == s_textureMap.end())
		{
			HiresTextureCacheItem item(min_item_size);
			if (is_normal_map)
			{
				item.normal_map.resize(min_item_size);
			}
			std::vector<hires_mip_level> &dst = is_normal_map ? item.normal_map : item.color_map;
			dst[level] = mip_level_detail;
			s_textureMap.emplace(FileName, item);
		}
		else
		{
			std::vector<hires_mip_level> &dst = is_normal_map ? iter->second.normal_map : iter->second.color_map;
			if (dst.size() < min_item_size)
			{
				dst.resize(min_item_size);
			}
			dst[level] = mip_level_detail;
		}
	}

	if (g_ActiveConfig.bCacheHiresTextures && s_textureMap.size() > 0)
	{
		// remove cached but deleted textures
		auto iter = s_textureCache.begin();
		while (iter != s_textureCache.end())
		{
			if (s_textureMap.find(iter->first) == s_textureMap.end())
			{
				size_sum.fetch_sub(iter->second->m_cached_data_size);
				iter = s_textureCache.erase(iter);
			}
			else
			{
				iter++;
			}
		}
		s_textureCacheAbortLoading.Clear();
		s_prefetcher = std::thread(Prefetch);
	}
}
示例#26
0
namespace DVDThread
{
struct ReadRequest
{
  bool copy_to_ram;
  u32 output_address;
  u64 dvd_offset;
  u32 length;
  DiscIO::Partition partition;

  // This determines which code DVDInterface will run to reply
  // to the emulated software. We can't use callbacks,
  // because function pointers can't be stored in savestates.
  DVDInterface::ReplyType reply_type;

  // IDs are used to uniquely identify a request. They must not be
  // identical to IDs of any other requests that currently exist, but
  // it's fine to re-use IDs of requests that have existed in the past.
  u64 id;

  // Only used for logging
  u64 time_started_ticks;
  u64 realtime_started_us;
  u64 realtime_done_us;
};

using ReadResult = std::pair<ReadRequest, std::vector<u8>>;

static void StartDVDThread();
static void StopDVDThread();

static void DVDThread();
static void WaitUntilIdle();

static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length,
                              const DiscIO::Partition& partition,
                              DVDInterface::ReplyType reply_type, s64 ticks_until_completion);

static void FinishRead(u64 id, s64 cycles_late);
static CoreTiming::EventType* s_finish_read;

static u64 s_next_id = 0;

static std::thread s_dvd_thread;
static Common::Event s_request_queue_expanded;    // Is set by CPU thread
static Common::Event s_result_queue_expanded;     // Is set by DVD thread
static Common::Flag s_dvd_thread_exiting(false);  // Is set by CPU thread

static Common::SPSCQueue<ReadRequest, false> s_request_queue;
static Common::SPSCQueue<ReadResult, false> s_result_queue;
static std::map<u64, ReadResult> s_result_map;

static std::unique_ptr<DiscIO::Volume> s_disc;

void Start()
{
  s_finish_read = CoreTiming::RegisterEvent("FinishReadDVDThread", FinishRead);

  s_request_queue_expanded.Reset();
  s_result_queue_expanded.Reset();
  s_request_queue.Clear();
  s_result_queue.Clear();

  // This is reset on every launch for determinism, but it doesn't matter
  // much, because this will never get exposed to the emulated game.
  s_next_id = 0;

  StartDVDThread();
}

static void StartDVDThread()
{
  ASSERT(!s_dvd_thread.joinable());
  s_dvd_thread_exiting.Clear();
  s_dvd_thread = std::thread(DVDThread);
}

void Stop()
{
  StopDVDThread();
  s_disc.reset();
}

static void StopDVDThread()
{
  ASSERT(s_dvd_thread.joinable());

  // By setting s_DVD_thread_exiting, we ask the DVD thread to cleanly exit.
  // In case the request queue is empty, we need to set s_request_queue_expanded
  // so that the DVD thread will wake up and check s_DVD_thread_exiting.
  s_dvd_thread_exiting.Set();
  s_request_queue_expanded.Set();

  s_dvd_thread.join();
}

void DoState(PointerWrap& p)
{
  // By waiting for the DVD thread to be done working, we ensure
  // that s_request_queue will be empty and that the DVD thread
  // won't be touching anything while this function runs.
  WaitUntilIdle();

  // Move all results from s_result_queue to s_result_map because
  // PointerWrap::Do supports std::map but not Common::SPSCQueue.
  // This won't affect the behavior of FinishRead.
  ReadResult result;
  while (s_result_queue.Pop(result))
    s_result_map.emplace(result.first.id, std::move(result));

  // Both queues are now empty, so we don't need to savestate them.
  p.Do(s_result_map);
  p.Do(s_next_id);

  // s_disc isn't savestated (because it points to files on the
  // local system). Instead, we check that the status of the disc
  // is the same as when the savestate was made. This won't catch
  // cases of having the wrong disc inserted, though.
  // TODO: Check the game ID, disc number, revision?
  bool had_disc = HasDisc();
  p.Do(had_disc);
  if (had_disc != HasDisc())
  {
    if (had_disc)
      PanicAlertT("An inserted disc was expected but not found.");
    else
      s_disc.reset();
  }

  // TODO: Savestates can be smaller if the buffers of results aren't saved,
  // but instead get re-read from the disc when loading the savestate.

  // TODO: It would be possible to create a savestate faster by stopping
  // the DVD thread regardless of whether there are pending requests.

  // After loading a savestate, the debug log in FinishRead will report
  // screwed up times for requests that were submitted before the savestate
  // was made. Handling that properly may be more effort than it's worth.
}

void SetDisc(std::unique_ptr<DiscIO::Volume> disc)
{
  WaitUntilIdle();
  s_disc = std::move(disc);
}

bool HasDisc()
{
  return s_disc != nullptr;
}

bool IsEncryptedAndHashed()
{
  // IsEncryptedAndHashed is thread-safe, so calling WaitUntilIdle isn't necessary.
  return s_disc->IsEncryptedAndHashed();
}

DiscIO::Platform GetDiscType()
{
  // GetVolumeType is thread-safe, so calling WaitUntilIdle isn't necessary.
  return s_disc->GetVolumeType();
}

u64 PartitionOffsetToRawOffset(u64 offset, const DiscIO::Partition& partition)
{
  // PartitionOffsetToRawOffset is thread-safe, so calling WaitUntilIdle isn't necessary.
  return s_disc->PartitionOffsetToRawOffset(offset, partition);
}

IOS::ES::TMDReader GetTMD(const DiscIO::Partition& partition)
{
  WaitUntilIdle();
  return s_disc->GetTMD(partition);
}

IOS::ES::TicketReader GetTicket(const DiscIO::Partition& partition)
{
  WaitUntilIdle();
  return s_disc->GetTicket(partition);
}

bool IsInsertedDiscRunning()
{
  if (!s_disc)
    return false;

  WaitUntilIdle();

  return SConfig::GetInstance().GetGameID() == s_disc->GetGameID();
}

bool UpdateRunningGameMetadata(const DiscIO::Partition& partition, std::optional<u64> title_id)
{
  if (!s_disc)
    return false;

  WaitUntilIdle();

  if (title_id)
  {
    const std::optional<u64> volume_title_id = s_disc->GetTitleID(partition);
    if (!volume_title_id || *volume_title_id != *title_id)
      return false;
  }

  SConfig::GetInstance().SetRunningGameMetadata(*s_disc, partition);
  return true;
}

void WaitUntilIdle()
{
  ASSERT(Core::IsCPUThread());

  while (!s_request_queue.Empty())
    s_result_queue_expanded.Wait();

  StopDVDThread();
  StartDVDThread();
}

void StartRead(u64 dvd_offset, u32 length, const DiscIO::Partition& partition,
               DVDInterface::ReplyType reply_type, s64 ticks_until_completion)
{
  StartReadInternal(false, 0, dvd_offset, length, partition, reply_type, ticks_until_completion);
}

void StartReadToEmulatedRAM(u32 output_address, u64 dvd_offset, u32 length,
                            const DiscIO::Partition& partition, DVDInterface::ReplyType reply_type,
                            s64 ticks_until_completion)
{
  StartReadInternal(true, output_address, dvd_offset, length, partition, reply_type,
                    ticks_until_completion);
}

static void StartReadInternal(bool copy_to_ram, u32 output_address, u64 dvd_offset, u32 length,
                              const DiscIO::Partition& partition,
                              DVDInterface::ReplyType reply_type, s64 ticks_until_completion)
{
  ASSERT(Core::IsCPUThread());

  ReadRequest request;

  request.copy_to_ram = copy_to_ram;
  request.output_address = output_address;
  request.dvd_offset = dvd_offset;
  request.length = length;
  request.partition = partition;
  request.reply_type = reply_type;

  u64 id = s_next_id++;
  request.id = id;

  request.time_started_ticks = CoreTiming::GetTicks();
  request.realtime_started_us = Common::Timer::GetTimeUs();

  s_request_queue.Push(std::move(request));
  s_request_queue_expanded.Set();

  CoreTiming::ScheduleEvent(ticks_until_completion, s_finish_read, id);
}

static void FinishRead(u64 id, s64 cycles_late)
{
  // We can't simply pop s_result_queue and always get the ReadResult
  // we want, because the DVD thread may add ReadResults to the queue
  // in a different order than we want to get them. What we do instead
  // is to pop the queue until we find the ReadResult we want (the one
  // whose ID matches userdata), which means we may end up popping
  // ReadResults that we don't want. We can't add those unwanted results
  // back to the queue, because the queue can only have one writer.
  // Instead, we add them to a map that only is used by the CPU thread.
  // When this function is called again later, it will check the map for
  // the wanted ReadResult before it starts searching through the queue.
  ReadResult result;
  auto it = s_result_map.find(id);
  if (it != s_result_map.end())
  {
    result = std::move(it->second);
    s_result_map.erase(it);
  }
  else
  {
    while (true)
    {
      while (!s_result_queue.Pop(result))
        s_result_queue_expanded.Wait();

      if (result.first.id == id)
        break;
      else
        s_result_map.emplace(result.first.id, std::move(result));
    }
  }
  // We have now obtained the right ReadResult.

  const ReadRequest& request = result.first;
  const std::vector<u8>& buffer = result.second;

  DEBUG_LOG(DVDINTERFACE,
            "Disc has been read. Real time: %" PRIu64 " us. "
            "Real time including delay: %" PRIu64 " us. "
            "Emulated time including delay: %" PRIu64 " us.",
            request.realtime_done_us - request.realtime_started_us,
            Common::Timer::GetTimeUs() - request.realtime_started_us,
            (CoreTiming::GetTicks() - request.time_started_ticks) /
                (SystemTimers::GetTicksPerSecond() / 1000000));

  if (buffer.size() != request.length)
  {
    PanicAlertT("The disc could not be read (at 0x%" PRIx64 " - 0x%" PRIx64 ").",
                request.dvd_offset, request.dvd_offset + request.length);
  }
  else
  {
    if (request.copy_to_ram)
      Memory::CopyToEmu(request.output_address, buffer.data(), request.length);
  }

  // Notify the emulated software that the command has been executed
  DVDInterface::FinishExecutingCommand(request.reply_type, DVDInterface::INT_TCINT, cycles_late,
                                       buffer);
}

static void DVDThread()
{
  Common::SetCurrentThreadName("DVD thread");

  while (true)
  {
    s_request_queue_expanded.Wait();

    if (s_dvd_thread_exiting.IsSet())
      return;

    ReadRequest request;
    while (s_request_queue.Pop(request))
    {
      FileMonitor::Log(*s_disc, request.partition, request.dvd_offset);

      std::vector<u8> buffer(request.length);
      if (!s_disc->Read(request.dvd_offset, request.length, buffer.data(), request.partition))
        buffer.resize(0);

      request.realtime_done_us = Common::Timer::GetTimeUs();

      s_result_queue.Push(ReadResult(std::move(request), std::move(buffer)));
      s_result_queue_expanded.Set();

      if (s_dvd_thread_exiting.IsSet())
        return;
    }
  }
}
}  // namespace DVDThread
示例#27
0
文件: Fifo.cpp 项目: stenzek/dolphin
// Description: Main FIFO update loop
// Purpose: Keep the Core HW updated about the CPU-GPU distance
void RunGpuLoop()
{
  AsyncRequests::GetInstance()->SetEnable(true);
  AsyncRequests::GetInstance()->SetPassthrough(false);

  s_gpu_mainloop.Run(
      [] {
        const SConfig& param = SConfig::GetInstance();

        g_video_backend->PeekMessages();

        // Do nothing while paused
        if (!s_emu_running_state.IsSet())
          return;

        if (s_use_deterministic_gpu_thread)
        {
          AsyncRequests::GetInstance()->PullEvents();

          // All the fifo/CP stuff is on the CPU.  We just need to run the opcode decoder.
          u8* seen_ptr = s_video_buffer_seen_ptr;
          u8* write_ptr = s_video_buffer_write_ptr;
          // See comment in SyncGPU
          if (write_ptr > seen_ptr)
          {
            s_video_buffer_read_ptr =
                OpcodeDecoder::Run(DataReader(s_video_buffer_read_ptr, write_ptr), nullptr, false);
            s_video_buffer_seen_ptr = write_ptr;
          }
        }
        else
        {
          SCPFifoStruct& fifo = CommandProcessor::fifo;

          AsyncRequests::GetInstance()->PullEvents();

          CommandProcessor::SetCPStatusFromGPU();

          // check if we are able to run this buffer
          while (!CommandProcessor::IsInterruptWaiting() && fifo.bFF_GPReadEnable &&
                 fifo.CPReadWriteDistance && !AtBreakpoint())
          {
            if (param.bSyncGPU && s_sync_ticks.load() < param.iSyncGpuMinDistance)
              break;

            u32 cyclesExecuted = 0;
            u32 readPtr = fifo.CPReadPointer;
            ReadDataFromFifo(readPtr);

            if (readPtr == fifo.CPEnd)
              readPtr = fifo.CPBase;
            else
              readPtr += 32;

            _assert_msg_(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0,
                         "Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce "
                         "instability in the game. Please report it.",
                         fifo.CPReadWriteDistance - 32);

            u8* write_ptr = s_video_buffer_write_ptr;
            s_video_buffer_read_ptr = OpcodeDecoder::Run(
                DataReader(s_video_buffer_read_ptr, write_ptr), &cyclesExecuted, false);

            Common::AtomicStore(fifo.CPReadPointer, readPtr);
            Common::AtomicAdd(fifo.CPReadWriteDistance, -32);
            if ((write_ptr - s_video_buffer_read_ptr) == 0)
              Common::AtomicStore(fifo.SafeCPReadPointer, fifo.CPReadPointer);

            CommandProcessor::SetCPStatusFromGPU();

            if (param.bSyncGPU)
            {
              cyclesExecuted = (int)(cyclesExecuted / param.fSyncGpuOverclock);
              int old = s_sync_ticks.fetch_sub(cyclesExecuted);
              if (old >= param.iSyncGpuMaxDistance &&
                  old - (int)cyclesExecuted < param.iSyncGpuMaxDistance)
                s_sync_wakeup_event.Set();
            }

            // This call is pretty important in DualCore mode and must be called in the FIFO Loop.
            // If we don't, s_swapRequested or s_efbAccessRequested won't be set to false
            // leading the CPU thread to wait in Video_BeginField or Video_AccessEFB thus slowing
            // things down.
            AsyncRequests::GetInstance()->PullEvents();
          }

          // fast skip remaining GPU time if fifo is empty
          if (s_sync_ticks.load() > 0)
          {
            int old = s_sync_ticks.exchange(0);
            if (old >= param.iSyncGpuMaxDistance)
              s_sync_wakeup_event.Set();
          }

          // The fifo is empty and it's unlikely we will get any more work in the near future.
          // Make sure VertexManager finishes drawing any primitives it has stored in it's buffer.
          g_vertex_manager->Flush();
        }
      },
      100);

  AsyncRequests::GetInstance()->SetEnable(false);
  AsyncRequests::GetInstance()->SetPassthrough(true);
}
示例#28
0
int main(int argc, char* argv[])
{
  auto parser = CommandLineParse::CreateParser(CommandLineParse::ParserOptions::OmitGUIOptions);
  optparse::Values& options = CommandLineParse::ParseArguments(parser.get(), argc, argv);
  std::vector<std::string> args = parser->args();

  std::string boot_filename;
  if (options.is_set("exec"))
  {
    boot_filename = static_cast<const char*>(options.get("exec"));
  }
  else if (args.size())
  {
    boot_filename = args.front();
    args.erase(args.begin());
  }
  else
  {
    parser->print_help();
    return 0;
  }

  std::string user_directory;
  if (options.is_set("user"))
  {
    user_directory = static_cast<const char*>(options.get("user"));
  }

  platform = GetPlatform();
  if (!platform)
  {
    fprintf(stderr, "No platform found\n");
    return 1;
  }

  UICommon::SetUserDirectory(user_directory);
  UICommon::Init();

  Core::SetOnStoppedCallback([]() { s_running.Clear(); });
  platform->Init();

  // Shut down cleanly on SIGINT and SIGTERM
  struct sigaction sa;
  sa.sa_handler = signal_handler;
  sigemptyset(&sa.sa_mask);
  sa.sa_flags = SA_RESETHAND;
  sigaction(SIGINT, &sa, nullptr);
  sigaction(SIGTERM, &sa, nullptr);

  DolphinAnalytics::Instance()->ReportDolphinStart("nogui");

  if (!BootManager::BootCore(BootParameters::GenerateFromFile(boot_filename)))
  {
    fprintf(stderr, "Could not boot %s\n", boot_filename.c_str());
    return 1;
  }

  while (!Core::IsRunning() && s_running.IsSet())
  {
    Core::HostDispatchJobs();
    updateMainFrameEvent.Wait();
  }

  if (s_running.IsSet())
    platform->MainLoop();
  Core::Stop();

  Core::Shutdown();
  platform->Shutdown();
  UICommon::Shutdown();

  delete platform;

  return 0;
}
示例#29
0
  void MainLoop() override
  {
    bool fullscreen = SConfig::GetInstance().bFullscreen;
    int last_window_width = SConfig::GetInstance().iRenderWindowWidth;
    int last_window_height = SConfig::GetInstance().iRenderWindowHeight;

    if (fullscreen)
    {
      rendererIsFullscreen = X11Utils::ToggleFullscreen(dpy, win);
#if defined(HAVE_XRANDR) && HAVE_XRANDR
      XRRConfig->ToggleDisplayMode(True);
#endif
    }

    // The actual loop
    while (s_running.IsSet())
    {
      if (s_shutdown_requested.TestAndClear())
      {
        const auto ios = IOS::HLE::GetIOS();
        const auto stm = ios ? ios->GetDeviceByName("/dev/stm/eventhook") : nullptr;
        if (!s_tried_graceful_shutdown.IsSet() && stm &&
            std::static_pointer_cast<IOS::HLE::Device::STMEventHook>(stm)->HasHookInstalled())
        {
          ProcessorInterface::PowerButton_Tap();
          s_tried_graceful_shutdown.Set();
        }
        else
        {
          s_running.Clear();
        }
      }

      XEvent event;
      KeySym key;
      for (int num_events = XPending(dpy); num_events > 0; num_events--)
      {
        XNextEvent(dpy, &event);
        switch (event.type)
        {
        case KeyPress:
          key = XLookupKeysym((XKeyEvent*)&event, 0);
          if (key == XK_Escape)
          {
            if (Core::GetState() == Core::State::Running)
            {
              if (SConfig::GetInstance().bHideCursor)
                XUndefineCursor(dpy, win);
              Core::SetState(Core::State::Paused);
            }
            else
            {
              if (SConfig::GetInstance().bHideCursor)
                XDefineCursor(dpy, win, blankCursor);
              Core::SetState(Core::State::Running);
            }
          }
          else if ((key == XK_Return) && (event.xkey.state & Mod1Mask))
          {
            fullscreen = !fullscreen;
            X11Utils::ToggleFullscreen(dpy, win);
#if defined(HAVE_XRANDR) && HAVE_XRANDR
            XRRConfig->ToggleDisplayMode(fullscreen);
#endif
          }
          else if (key >= XK_F1 && key <= XK_F8)
          {
            int slot_number = key - XK_F1 + 1;
            if (event.xkey.state & ShiftMask)
              State::Save(slot_number);
            else
              State::Load(slot_number);
          }
          else if (key == XK_F9)
            Core::SaveScreenShot();
          else if (key == XK_F11)
            State::LoadLastSaved();
          else if (key == XK_F12)
          {
            if (event.xkey.state & ShiftMask)
              State::UndoLoadState();
            else
              State::UndoSaveState();
          }
          break;
        case FocusIn:
          rendererHasFocus = true;
          if (SConfig::GetInstance().bHideCursor && Core::GetState() != Core::State::Paused)
            XDefineCursor(dpy, win, blankCursor);
          break;
        case FocusOut:
          rendererHasFocus = false;
          if (SConfig::GetInstance().bHideCursor)
            XUndefineCursor(dpy, win);
          break;
        case ClientMessage:
          if ((unsigned long)event.xclient.data.l[0] == XInternAtom(dpy, "WM_DELETE_WINDOW", False))
            s_shutdown_requested.Set();
          break;
        case ConfigureNotify:
        {
          if (last_window_width != event.xconfigure.width ||
              last_window_height != event.xconfigure.height)
          {
            last_window_width = event.xconfigure.width;
            last_window_height = event.xconfigure.height;

            // We call Renderer::ChangeSurface here to indicate the size has changed,
            // but pass the same window handle. This is needed for the Vulkan backend,
            // otherwise it cannot tell that the window has been resized on some drivers.
            if (g_renderer)
              g_renderer->ChangeSurface(s_window_handle);
          }
        }
        break;
        }
      }
      if (!fullscreen)
      {
        Window winDummy;
        unsigned int borderDummy, depthDummy;
        XGetGeometry(dpy, win, &winDummy, &SConfig::GetInstance().iRenderWindowXPos,
                     &SConfig::GetInstance().iRenderWindowYPos,
                     (unsigned int*)&SConfig::GetInstance().iRenderWindowWidth,
                     (unsigned int*)&SConfig::GetInstance().iRenderWindowHeight, &borderDummy,
                     &depthDummy);
        rendererIsFullscreen = false;
      }
      Core::HostDispatchJobs();
      usleep(100000);
    }
  }
示例#30
0
文件: Fifo.cpp 项目: stenzek/dolphin
namespace Fifo
{
static constexpr u32 FIFO_SIZE = 2 * 1024 * 1024;
static constexpr int GPU_TIME_SLOT_SIZE = 1000;

static Common::BlockingLoop s_gpu_mainloop;

static Common::Flag s_emu_running_state;

// Most of this array is unlikely to be faulted in...
static u8 s_fifo_aux_data[FIFO_SIZE];
static u8* s_fifo_aux_write_ptr;
static u8* s_fifo_aux_read_ptr;

// This could be in SConfig, but it depends on multiple settings
// and can change at runtime.
static bool s_use_deterministic_gpu_thread;

static CoreTiming::EventType* s_event_sync_gpu;

// STATE_TO_SAVE
static u8* s_video_buffer;
static u8* s_video_buffer_read_ptr;
static std::atomic<u8*> s_video_buffer_write_ptr;
static std::atomic<u8*> s_video_buffer_seen_ptr;
static u8* s_video_buffer_pp_read_ptr;
// The read_ptr is always owned by the GPU thread.  In normal mode, so is the
// write_ptr, despite it being atomic.  In deterministic GPU thread mode,
// things get a bit more complicated:
// - The seen_ptr is written by the GPU thread, and points to what it's already
// processed as much of as possible - in the case of a partial command which
// caused it to stop, not the same as the read ptr.  It's written by the GPU,
// under the lock, and updating the cond.
// - The write_ptr is written by the CPU thread after it copies data from the
// FIFO.  Maybe someday it will be under the lock.  For now, because RunGpuLoop
// polls, it's just atomic.
// - The pp_read_ptr is the CPU preprocessing version of the read_ptr.

static std::atomic<int> s_sync_ticks;
static bool s_syncing_suspended;
static Common::Event s_sync_wakeup_event;

void DoState(PointerWrap& p)
{
  p.DoArray(s_video_buffer, FIFO_SIZE);
  u8* write_ptr = s_video_buffer_write_ptr;
  p.DoPointer(write_ptr, s_video_buffer);
  s_video_buffer_write_ptr = write_ptr;
  p.DoPointer(s_video_buffer_read_ptr, s_video_buffer);
  if (p.mode == PointerWrap::MODE_READ && s_use_deterministic_gpu_thread)
  {
    // We're good and paused, right?
    s_video_buffer_seen_ptr = s_video_buffer_pp_read_ptr = s_video_buffer_read_ptr;
  }

  p.Do(s_sync_ticks);
  p.Do(s_syncing_suspended);
}

void PauseAndLock(bool doLock, bool unpauseOnUnlock)
{
  if (doLock)
  {
    SyncGPU(SyncGPUReason::Other);
    EmulatorState(false);

    const SConfig& param = SConfig::GetInstance();

    if (!param.bCPUThread || s_use_deterministic_gpu_thread)
      return;

    s_gpu_mainloop.WaitYield(std::chrono::milliseconds(100), Host_YieldToUI);
  }
  else
  {
    if (unpauseOnUnlock)
      EmulatorState(true);
  }
}

void Init()
{
  // Padded so that SIMD overreads in the vertex loader are safe
  s_video_buffer = static_cast<u8*>(Common::AllocateMemoryPages(FIFO_SIZE + 4));
  ResetVideoBuffer();
  if (SConfig::GetInstance().bCPUThread)
    s_gpu_mainloop.Prepare();
  s_sync_ticks.store(0);
}

void Shutdown()
{
  if (s_gpu_mainloop.IsRunning())
    PanicAlert("Fifo shutting down while active");

  Common::FreeMemoryPages(s_video_buffer, FIFO_SIZE + 4);
  s_video_buffer = nullptr;
  s_video_buffer_write_ptr = nullptr;
  s_video_buffer_pp_read_ptr = nullptr;
  s_video_buffer_read_ptr = nullptr;
  s_video_buffer_seen_ptr = nullptr;
  s_fifo_aux_write_ptr = nullptr;
  s_fifo_aux_read_ptr = nullptr;
}

// May be executed from any thread, even the graphics thread.
// Created to allow for self shutdown.
void ExitGpuLoop()
{
  // This should break the wait loop in CPU thread
  CommandProcessor::fifo.bFF_GPReadEnable = false;
  FlushGpu();

  // Terminate GPU thread loop
  s_emu_running_state.Set();
  s_gpu_mainloop.Stop(false);
}

void EmulatorState(bool running)
{
  s_emu_running_state.Set(running);
  if (running)
    s_gpu_mainloop.Wakeup();
  else
    s_gpu_mainloop.AllowSleep();
}

void SyncGPU(SyncGPUReason reason, bool may_move_read_ptr)
{
  if (s_use_deterministic_gpu_thread)
  {
    s_gpu_mainloop.Wait();
    if (!s_gpu_mainloop.IsRunning())
      return;

    // Opportunistically reset FIFOs so we don't wrap around.
    if (may_move_read_ptr && s_fifo_aux_write_ptr != s_fifo_aux_read_ptr)
      PanicAlert("aux fifo not synced (%p, %p)", s_fifo_aux_write_ptr, s_fifo_aux_read_ptr);

    memmove(s_fifo_aux_data, s_fifo_aux_read_ptr, s_fifo_aux_write_ptr - s_fifo_aux_read_ptr);
    s_fifo_aux_write_ptr -= (s_fifo_aux_read_ptr - s_fifo_aux_data);
    s_fifo_aux_read_ptr = s_fifo_aux_data;

    if (may_move_read_ptr)
    {
      u8* write_ptr = s_video_buffer_write_ptr;

      // what's left over in the buffer
      size_t size = write_ptr - s_video_buffer_pp_read_ptr;

      memmove(s_video_buffer, s_video_buffer_pp_read_ptr, size);
      // This change always decreases the pointers.  We write seen_ptr
      // after write_ptr here, and read it before in RunGpuLoop, so
      // 'write_ptr > seen_ptr' there cannot become spuriously true.
      s_video_buffer_write_ptr = write_ptr = s_video_buffer + size;
      s_video_buffer_pp_read_ptr = s_video_buffer;
      s_video_buffer_read_ptr = s_video_buffer;
      s_video_buffer_seen_ptr = write_ptr;
    }
  }
}

void PushFifoAuxBuffer(void* ptr, size_t size)
{
  if (size > (size_t)(s_fifo_aux_data + FIFO_SIZE - s_fifo_aux_write_ptr))
  {
    SyncGPU(SyncGPUReason::AuxSpace, /* may_move_read_ptr */ false);
    if (!s_gpu_mainloop.IsRunning())
    {
      // GPU is shutting down
      return;
    }
    if (size > (size_t)(s_fifo_aux_data + FIFO_SIZE - s_fifo_aux_write_ptr))
    {
      // That will sync us up to the last 32 bytes, so this short region
      // of FIFO would have to point to a 2MB display list or something.
      PanicAlert("absurdly large aux buffer");
      return;
    }
  }
  memcpy(s_fifo_aux_write_ptr, ptr, size);
  s_fifo_aux_write_ptr += size;
}

void* PopFifoAuxBuffer(size_t size)
{
  void* ret = s_fifo_aux_read_ptr;
  s_fifo_aux_read_ptr += size;
  return ret;
}

// Description: RunGpuLoop() sends data through this function.
static void ReadDataFromFifo(u32 readPtr)
{
  size_t len = 32;
  if (len > (size_t)(s_video_buffer + FIFO_SIZE - s_video_buffer_write_ptr))
  {
    size_t existing_len = s_video_buffer_write_ptr - s_video_buffer_read_ptr;
    if (len > (size_t)(FIFO_SIZE - existing_len))
    {
      PanicAlert("FIFO out of bounds (existing %zu + new %zu > %lu)", existing_len, len,
                 (unsigned long)FIFO_SIZE);
      return;
    }
    memmove(s_video_buffer, s_video_buffer_read_ptr, existing_len);
    s_video_buffer_write_ptr = s_video_buffer + existing_len;
    s_video_buffer_read_ptr = s_video_buffer;
  }
  // Copy new video instructions to s_video_buffer for future use in rendering the new picture
  Memory::CopyFromEmu(s_video_buffer_write_ptr, readPtr, len);
  s_video_buffer_write_ptr += len;
}

// The deterministic_gpu_thread version.
static void ReadDataFromFifoOnCPU(u32 readPtr)
{
  size_t len = 32;
  u8* write_ptr = s_video_buffer_write_ptr;
  if (len > (size_t)(s_video_buffer + FIFO_SIZE - write_ptr))
  {
    // We can't wrap around while the GPU is working on the data.
    // This should be very rare due to the reset in SyncGPU.
    SyncGPU(SyncGPUReason::Wraparound);
    if (!s_gpu_mainloop.IsRunning())
    {
      // GPU is shutting down, so the next asserts may fail
      return;
    }

    if (s_video_buffer_pp_read_ptr != s_video_buffer_read_ptr)
    {
      PanicAlert("desynced read pointers");
      return;
    }
    write_ptr = s_video_buffer_write_ptr;
    size_t existing_len = write_ptr - s_video_buffer_pp_read_ptr;
    if (len > (size_t)(FIFO_SIZE - existing_len))
    {
      PanicAlert("FIFO out of bounds (existing %zu + new %zu > %lu)", existing_len, len,
                 (unsigned long)FIFO_SIZE);
      return;
    }
  }
  Memory::CopyFromEmu(s_video_buffer_write_ptr, readPtr, len);
  s_video_buffer_pp_read_ptr = OpcodeDecoder::Run<true>(
      DataReader(s_video_buffer_pp_read_ptr, write_ptr + len), nullptr, false);
  // This would have to be locked if the GPU thread didn't spin.
  s_video_buffer_write_ptr = write_ptr + len;
}

void ResetVideoBuffer()
{
  s_video_buffer_read_ptr = s_video_buffer;
  s_video_buffer_write_ptr = s_video_buffer;
  s_video_buffer_seen_ptr = s_video_buffer;
  s_video_buffer_pp_read_ptr = s_video_buffer;
  s_fifo_aux_write_ptr = s_fifo_aux_data;
  s_fifo_aux_read_ptr = s_fifo_aux_data;
}

// Description: Main FIFO update loop
// Purpose: Keep the Core HW updated about the CPU-GPU distance
void RunGpuLoop()
{
  AsyncRequests::GetInstance()->SetEnable(true);
  AsyncRequests::GetInstance()->SetPassthrough(false);

  s_gpu_mainloop.Run(
      [] {
        const SConfig& param = SConfig::GetInstance();

        g_video_backend->PeekMessages();

        // Do nothing while paused
        if (!s_emu_running_state.IsSet())
          return;

        if (s_use_deterministic_gpu_thread)
        {
          AsyncRequests::GetInstance()->PullEvents();

          // All the fifo/CP stuff is on the CPU.  We just need to run the opcode decoder.
          u8* seen_ptr = s_video_buffer_seen_ptr;
          u8* write_ptr = s_video_buffer_write_ptr;
          // See comment in SyncGPU
          if (write_ptr > seen_ptr)
          {
            s_video_buffer_read_ptr =
                OpcodeDecoder::Run(DataReader(s_video_buffer_read_ptr, write_ptr), nullptr, false);
            s_video_buffer_seen_ptr = write_ptr;
          }
        }
        else
        {
          SCPFifoStruct& fifo = CommandProcessor::fifo;

          AsyncRequests::GetInstance()->PullEvents();

          CommandProcessor::SetCPStatusFromGPU();

          // check if we are able to run this buffer
          while (!CommandProcessor::IsInterruptWaiting() && fifo.bFF_GPReadEnable &&
                 fifo.CPReadWriteDistance && !AtBreakpoint())
          {
            if (param.bSyncGPU && s_sync_ticks.load() < param.iSyncGpuMinDistance)
              break;

            u32 cyclesExecuted = 0;
            u32 readPtr = fifo.CPReadPointer;
            ReadDataFromFifo(readPtr);

            if (readPtr == fifo.CPEnd)
              readPtr = fifo.CPBase;
            else
              readPtr += 32;

            _assert_msg_(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0,
                         "Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce "
                         "instability in the game. Please report it.",
                         fifo.CPReadWriteDistance - 32);

            u8* write_ptr = s_video_buffer_write_ptr;
            s_video_buffer_read_ptr = OpcodeDecoder::Run(
                DataReader(s_video_buffer_read_ptr, write_ptr), &cyclesExecuted, false);

            Common::AtomicStore(fifo.CPReadPointer, readPtr);
            Common::AtomicAdd(fifo.CPReadWriteDistance, -32);
            if ((write_ptr - s_video_buffer_read_ptr) == 0)
              Common::AtomicStore(fifo.SafeCPReadPointer, fifo.CPReadPointer);

            CommandProcessor::SetCPStatusFromGPU();

            if (param.bSyncGPU)
            {
              cyclesExecuted = (int)(cyclesExecuted / param.fSyncGpuOverclock);
              int old = s_sync_ticks.fetch_sub(cyclesExecuted);
              if (old >= param.iSyncGpuMaxDistance &&
                  old - (int)cyclesExecuted < param.iSyncGpuMaxDistance)
                s_sync_wakeup_event.Set();
            }

            // This call is pretty important in DualCore mode and must be called in the FIFO Loop.
            // If we don't, s_swapRequested or s_efbAccessRequested won't be set to false
            // leading the CPU thread to wait in Video_BeginField or Video_AccessEFB thus slowing
            // things down.
            AsyncRequests::GetInstance()->PullEvents();
          }

          // fast skip remaining GPU time if fifo is empty
          if (s_sync_ticks.load() > 0)
          {
            int old = s_sync_ticks.exchange(0);
            if (old >= param.iSyncGpuMaxDistance)
              s_sync_wakeup_event.Set();
          }

          // The fifo is empty and it's unlikely we will get any more work in the near future.
          // Make sure VertexManager finishes drawing any primitives it has stored in it's buffer.
          g_vertex_manager->Flush();
        }
      },
      100);

  AsyncRequests::GetInstance()->SetEnable(false);
  AsyncRequests::GetInstance()->SetPassthrough(true);
}

void FlushGpu()
{
  const SConfig& param = SConfig::GetInstance();

  if (!param.bCPUThread || s_use_deterministic_gpu_thread)
    return;

  s_gpu_mainloop.Wait();
}

void GpuMaySleep()
{
  s_gpu_mainloop.AllowSleep();
}

bool AtBreakpoint()
{
  SCPFifoStruct& fifo = CommandProcessor::fifo;
  return fifo.bFF_BPEnable && (fifo.CPReadPointer == fifo.CPBreakpoint);
}

void RunGpu()
{
  const SConfig& param = SConfig::GetInstance();

  // wake up GPU thread
  if (param.bCPUThread && !s_use_deterministic_gpu_thread)
  {
    s_gpu_mainloop.Wakeup();
  }

  // if the sync GPU callback is suspended, wake it up.
  if (!SConfig::GetInstance().bCPUThread || s_use_deterministic_gpu_thread ||
      SConfig::GetInstance().bSyncGPU)
  {
    if (s_syncing_suspended)
    {
      s_syncing_suspended = false;
      CoreTiming::ScheduleEvent(GPU_TIME_SLOT_SIZE, s_event_sync_gpu, GPU_TIME_SLOT_SIZE);
    }
  }
}

static int RunGpuOnCpu(int ticks)
{
  SCPFifoStruct& fifo = CommandProcessor::fifo;
  bool reset_simd_state = false;
  int available_ticks = int(ticks * SConfig::GetInstance().fSyncGpuOverclock) + s_sync_ticks.load();
  while (fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint() &&
         available_ticks >= 0)
  {
    if (s_use_deterministic_gpu_thread)
    {
      ReadDataFromFifoOnCPU(fifo.CPReadPointer);
      s_gpu_mainloop.Wakeup();
    }
    else
    {
      if (!reset_simd_state)
      {
        FPURoundMode::SaveSIMDState();
        FPURoundMode::LoadDefaultSIMDState();
        reset_simd_state = true;
      }
      ReadDataFromFifo(fifo.CPReadPointer);
      u32 cycles = 0;
      s_video_buffer_read_ptr = OpcodeDecoder::Run(
          DataReader(s_video_buffer_read_ptr, s_video_buffer_write_ptr), &cycles, false);
      available_ticks -= cycles;
    }

    if (fifo.CPReadPointer == fifo.CPEnd)
      fifo.CPReadPointer = fifo.CPBase;
    else
      fifo.CPReadPointer += 32;

    fifo.CPReadWriteDistance -= 32;
  }

  CommandProcessor::SetCPStatusFromGPU();

  if (reset_simd_state)
  {
    FPURoundMode::LoadSIMDState();
  }

  // Discard all available ticks as there is nothing to do any more.
  s_sync_ticks.store(std::min(available_ticks, 0));

  // If the GPU is idle, drop the handler.
  if (available_ticks >= 0)
    return -1;

  // Always wait at least for GPU_TIME_SLOT_SIZE cycles.
  return -available_ticks + GPU_TIME_SLOT_SIZE;
}

void UpdateWantDeterminism(bool want)
{
  // We are paused (or not running at all yet), so
  // it should be safe to change this.
  const SConfig& param = SConfig::GetInstance();
  bool gpu_thread = false;
  switch (param.m_GPUDeterminismMode)
  {
  case GPU_DETERMINISM_AUTO:
    gpu_thread = want;

    // Hack: For now movies are an exception to this being on (but not
    // to wanting determinism in general).  Once vertex arrays are
    // fixed, there should be no reason to want this off for movies by
    // default, so this can be removed.
    if (!NetPlay::IsNetPlayRunning())
      gpu_thread = false;

    break;
  case GPU_DETERMINISM_NONE:
    gpu_thread = false;
    break;
  case GPU_DETERMINISM_FAKE_COMPLETION:
    gpu_thread = true;
    break;
  }

  gpu_thread = gpu_thread && param.bCPUThread;

  if (s_use_deterministic_gpu_thread != gpu_thread)
  {
    s_use_deterministic_gpu_thread = gpu_thread;
    if (gpu_thread)
    {
      // These haven't been updated in non-deterministic mode.
      s_video_buffer_seen_ptr = s_video_buffer_pp_read_ptr = s_video_buffer_read_ptr;
      CopyPreprocessCPStateFromMain();
      VertexLoaderManager::MarkAllDirty();
    }
  }
}

bool UseDeterministicGPUThread()
{
  return s_use_deterministic_gpu_thread;
}

/* This function checks the emulated CPU - GPU distance and may wake up the GPU,
 * or block the CPU if required. It should be called by the CPU thread regularly.
 * @ticks The gone emulated CPU time.
 * @return A good time to call WaitForGpuThread() next.
 */
static int WaitForGpuThread(int ticks)
{
  const SConfig& param = SConfig::GetInstance();

  int old = s_sync_ticks.fetch_add(ticks);
  int now = old + ticks;

  // GPU is idle, so stop polling.
  if (old >= 0 && s_gpu_mainloop.IsDone())
    return -1;

  // Wakeup GPU
  if (old < param.iSyncGpuMinDistance && now >= param.iSyncGpuMinDistance)
    RunGpu();

  // If the GPU is still sleeping, wait for a longer time
  if (now < param.iSyncGpuMinDistance)
    return GPU_TIME_SLOT_SIZE + param.iSyncGpuMinDistance - now;

  // Wait for GPU
  if (now >= param.iSyncGpuMaxDistance)
    s_sync_wakeup_event.Wait();

  return GPU_TIME_SLOT_SIZE;
}

static void SyncGPUCallback(u64 ticks, s64 cyclesLate)
{
  ticks += cyclesLate;
  int next = -1;

  if (!SConfig::GetInstance().bCPUThread || s_use_deterministic_gpu_thread)
  {
    next = RunGpuOnCpu((int)ticks);
  }
  else if (SConfig::GetInstance().bSyncGPU)
  {
    next = WaitForGpuThread((int)ticks);
  }

  s_syncing_suspended = next < 0;
  if (!s_syncing_suspended)
    CoreTiming::ScheduleEvent(next, s_event_sync_gpu, next);
}

// Initialize GPU - CPU thread syncing, this gives us a deterministic way to start the GPU thread.
void Prepare()
{
  s_event_sync_gpu = CoreTiming::RegisterEvent("SyncGPUCallback", SyncGPUCallback);
  s_syncing_suspended = true;
}
}