Ejemplo n.º 1
0
    bool for_each(std::vector<Simulator*>& simulators, Time time) noexcept
    {
        m_jobs = &simulators;
        m_time = time;

        auto sz = (simulators.size() / m_block_size) +
                  ((simulators.size() % m_block_size) ? 1 : 0);

        m_block_count.store(sz, std::memory_order_relaxed);
        m_block_id.store(sz, std::memory_order_relaxed);

        for (;;) {
            auto block = m_block_id.fetch_sub(1, std::memory_order_relaxed);

            if (block < 0)
                break;

            std::size_t begin = block * m_block_size;
            std::size_t begin_plus_b = begin + m_block_size;
            std::size_t end = std::min(m_jobs->size(), begin_plus_b);

            for (; begin < end; ++begin)
                simulator_process((*m_jobs)[begin], m_time);

            m_block_count.fetch_sub(1, std::memory_order_relaxed);
        }

        while (m_block_count.load(std::memory_order_relaxed) >= 0)
            std::this_thread::sleep_for(std::chrono::nanoseconds(1));

        m_jobs = nullptr;

        return true;
    }
Ejemplo n.º 2
0
Socket* SocketPool::GetSocket() {
  for ( ; ; ) {
    Socket* sock = nullptr;
    {
      std::unique_lock<std::mutex> lock(mtx_);
      if (pool_.empty()) {
        break;
      }

      sock = pool_.back();
      pool_.pop_back();
    }

    pool_count_.fetch_sub(1, std::memory_order_relaxed);

    if (sock && sock->IsValid()) {
      return sock;
    }

    delete sock;
  }

  if (pool_count_.load(std::memory_order_relaxed) < pool_size_) {
    pool_count_.fetch_add(1, std::memory_order_relaxed);
    return new Socket(remote_side_.ip, remote_side_.port);
  }

  return nullptr;
}
Ejemplo n.º 3
0
  int Pop()
  {
      while (count.load(std::memory_order_acquire) > 1)
      {
          int head1 = head.load(std::memory_order_acquire);
          int next1 = array[head1].Next.exchange(-1, std::memory_order_seq_cst);

          if (next1 >= 0)
          {
              int head2 = head1;
              if (head.compare_exchange_strong(head2, next1, std::memory_order_seq_cst))
              {
                  count.fetch_sub(1, std::memory_order_seq_cst);
                  return head1;
              }
              else
              {
                  array[head1].Next.exchange(next1, std::memory_order_seq_cst);
              }
          }
          else
          {
            sched_yield();
          }
      }

      return -1;
  }
Ejemplo n.º 4
0
 void release()
 {
     if (1 == rc.fetch_sub(1, rl::memory_order_release))
     {
         rc.load(rl::memory_order_acquire);
         data($) = 0;
         delete this;
     }
 }
Ejemplo n.º 5
0
void
my_callback(
    context_t::caller_request_t* request)
{
    // when using the callback it's the job of the callback to delete
    // the request freeing up all memory allocated for that request
    // this is done because the library doesn't know how long the caller
    // will need to keep the request around
    delete request;
    COUNT.fetch_sub(1);
}
Ejemplo n.º 6
0
void consume_queue_items()
{
    while (true) {
        int item_index;

        if ((item_index = count.fetch_sub(1, std::memory_order_acquire)) <= 0) {
            wait_for_more_items();
            continue;
        }

        process(queue_data[item_index - 1]);
    }
}
Ejemplo n.º 7
0
	static bool GetSharedTask(task_entry & out)
	{
		uint32_t start = Scheduler->threadId;
		uint32_t id = start;
		do
		{
			if (SchedulerList[id].sharedTasks.pop_front(out))
			{
				GlobalSharedTaskCount.fetch_sub(1, std::memory_order_relaxed);
				return true;
			}
			id = (id + 1) < ThreadCount ? (id + 1) : 0;
		} while (id != start);
		return false;
	}
Ejemplo n.º 8
0
    void run()
    {
        while (m_running_flag.load(std::memory_order_relaxed)) {
            auto block = m_block_id.fetch_sub(1, std::memory_order_relaxed);

            if (block >= 0) {
                std::size_t begin = block * m_block_size;
                std::size_t begin_plus_b = begin + m_block_size;
                std::size_t end = std::min(m_jobs->size(), begin_plus_b);

                for (; begin < end; ++begin)
                    simulator_process((*m_jobs)[begin], m_time);

                m_block_count.fetch_sub(1, std::memory_order_relaxed);
            } else {
                //
                // TODO: Maybe we can use a yield instead of this
                // sleep_for function to reduce the overhead of the
                // current thread.
                //
                std::this_thread::sleep_for(std::chrono::nanoseconds(1));
            }
        }
    }
Ejemplo n.º 9
0
RegionDesc::Block::Block(const Func* func,
                         bool        resumed,
                         Offset      start,
                         int         length,
                         FPInvOffset initSpOff)
  : m_id(s_nextId.fetch_sub(1, std::memory_order_relaxed))
  , m_func(func)
  , m_resumed(resumed)
  , m_start(start)
  , m_last(kInvalidOffset)
  , m_length(length)
  , m_initialSpOffset(initSpOff)
  , m_profTransID(kInvalidTransID)
{
  assertx(length >= 0);
  if (length > 0) {
    SrcKey sk(func, start, resumed);
    for (unsigned i = 1; i < length; ++i) sk.advance();
    m_last = sk.offset();
  }
  checkInstructions();
  checkMetadata();
}
Ejemplo n.º 10
0
 ~MemoryCheck()
 {
     fake_allocated_memory.fetch_sub(b, std::memory_order_relaxed);
 };
Ejemplo n.º 11
0
void HiresTexture::Update()
{
  bool BuildMaterialMaps = g_ActiveConfig.bHiresMaterialMapsBuild;
  if (s_prefetcher.joinable())
  {
    s_textureCacheAbortLoading.Set();
    s_prefetcher.join();
  }

  if (!g_ActiveConfig.bHiresTextures)
  {
    s_textureMap.clear();
    s_textureCache.clear();
    size_sum.store(0);
    return;
  }

  if (!g_ActiveConfig.bCacheHiresTextures)
  {
    s_textureCache.clear();
    size_sum.store(0);
  }

  s_textureMap.clear();
  const std::string& game_id = SConfig::GetInstance().GetGameID();
  const std::string texture_directory = GetTextureDirectory(game_id);

  std::string ddscode(".dds");
  std::string cddscode(".DDS");
  std::vector<std::string> Extensions;
  Extensions.push_back(".png");
  if (!BuildMaterialMaps)
  {
    Extensions.push_back(".dds");
  }

  std::vector<std::string> filenames =
      Common::DoFileSearch({texture_directory}, Extensions, /*recursive*/ true);

  const std::string miptag = "mip";

  for (const std::string& fileitem : filenames)
  {
    std::string filename;
    std::string Extension;
    SplitPath(fileitem, nullptr, &filename, &Extension);
    if (filename.substr(0, s_format_prefix.length()) != s_format_prefix)
    {
      // Discard wrong files
      continue;
    }
    size_t map_index = 0;
    size_t max_type = BuildMaterialMaps ? MapType::specular : MapType::normal;
    bool arbitrary_mips = false;
    for (size_t tag = 1; tag <= MapType::specular; tag++)
    {
      if (StringEndsWith(filename, s_maps_tags[tag]))
      {
        map_index = tag;
        filename = filename.substr(0, filename.size() - s_maps_tags[tag].size());
        break;
      }
    }
    if (map_index > max_type)
    {
      continue;
    }
    if (BuildMaterialMaps && map_index == MapType::material)
    {
      continue;
    }
    else if (!BuildMaterialMaps && map_index == MapType::color)
    {
      const size_t arb_index = filename.rfind("_arb");
      arbitrary_mips = arb_index != std::string::npos;
      if (arbitrary_mips)
        filename.erase(arb_index, 4);
    }
    const bool is_compressed = Extension.compare(ddscode) == 0 || Extension.compare(cddscode) == 0;
    hires_mip_level mip_level_detail(fileitem, Extension, is_compressed);
    u32 level = 0;
    size_t idx = filename.find_last_of('_');
    std::string miplevel = filename.substr(idx + 1, std::string::npos);
    if (miplevel.substr(0, miptag.length()) == miptag)
    {
      sscanf(miplevel.substr(3, std::string::npos).c_str(), "%i", &level);
      filename = filename.substr(0, idx);
    }
    HiresTextureCache::iterator iter = s_textureMap.find(filename);
    u32 min_item_size = level + 1;
    if (iter == s_textureMap.end())
    {
      HiresTextureCacheItem item(min_item_size);
      if (arbitrary_mips)
      {
        item.has_arbitrary_mips = true;
      }
      item.maps[map_index].resize(min_item_size);
      std::vector<hires_mip_level>& dst = item.maps[map_index];
      dst[level] = mip_level_detail;
      s_textureMap.emplace(filename, item);
    }
    else
    {
      std::vector<hires_mip_level>& dst = iter->second.maps[map_index];
      if (arbitrary_mips)
      {
        iter->second.has_arbitrary_mips = true;
      }
      if (dst.size() < min_item_size)
      {
        dst.resize(min_item_size);
      }
      dst[level] = mip_level_detail;
    }
  }

  if (g_ActiveConfig.bCacheHiresTextures && s_textureMap.size() > 0)
  {
    // remove cached but deleted textures
    auto iter = s_textureCache.begin();
    while (iter != s_textureCache.end())
    {
      if (s_textureMap.find(iter->first) == s_textureMap.end())
      {
        size_sum.fetch_sub(iter->second->m_cached_data_size);
        iter = s_textureCache.erase(iter);
      }
      else
      {
        iter++;
      }
    }
    s_textureCacheAbortLoading.Clear();
    s_prefetcher = std::thread(Prefetch);
    if (g_ActiveConfig.bWaitForCacheHiresTextures && s_prefetcher.joinable())
    {
      s_prefetcher.join();
    }
  }
}
Ejemplo n.º 12
0
// Description: Main FIFO update loop
// Purpose: Keep the Core HW updated about the CPU-GPU distance
void RunGpuLoop()
{

	AsyncRequests::GetInstance()->SetEnable(true);
	AsyncRequests::GetInstance()->SetPassthrough(false);

	s_gpu_mainloop.Run(
	[] {
		const SConfig& param = SConfig::GetInstance();

		g_video_backend->PeekMessages();

		// Do nothing while paused
		if (!s_emu_running_state.load())
			return;

		if (g_use_deterministic_gpu_thread)
		{
			AsyncRequests::GetInstance()->PullEvents();

			// All the fifo/CP stuff is on the CPU.  We just need to run the opcode decoder.
			u8* seen_ptr = s_video_buffer_seen_ptr;
			u8* write_ptr = s_video_buffer_write_ptr;
			// See comment in SyncGPU
			if (write_ptr > seen_ptr)
			{
				s_video_buffer_read_ptr = OpcodeDecoder_Run(DataReader(s_video_buffer_read_ptr, write_ptr), nullptr, false);
				s_video_buffer_seen_ptr = write_ptr;
			}
		}
		else
		{
			SCPFifoStruct &fifo = CommandProcessor::fifo;

			AsyncRequests::GetInstance()->PullEvents();

			CommandProcessor::SetCPStatusFromGPU();

			// check if we are able to run this buffer
			while (!CommandProcessor::IsInterruptWaiting() && fifo.bFF_GPReadEnable && fifo.CPReadWriteDistance && !AtBreakpoint())
			{
				if (param.bSyncGPU && s_sync_ticks.load() < param.iSyncGpuMinDistance)
					break;

				u32 cyclesExecuted = 0;
				u32 readPtr = fifo.CPReadPointer;
				ReadDataFromFifo(readPtr);

				if (readPtr == fifo.CPEnd)
					readPtr = fifo.CPBase;
				else
					readPtr += 32;

				_assert_msg_(COMMANDPROCESSOR, (s32)fifo.CPReadWriteDistance - 32 >= 0 ,
					"Negative fifo.CPReadWriteDistance = %i in FIFO Loop !\nThat can produce instability in the game. Please report it.", fifo.CPReadWriteDistance - 32);

				u8* write_ptr = s_video_buffer_write_ptr;
				s_video_buffer_read_ptr = OpcodeDecoder_Run(DataReader(s_video_buffer_read_ptr, write_ptr), &cyclesExecuted, false);

				Common::AtomicStore(fifo.CPReadPointer, readPtr);
				Common::AtomicAdd(fifo.CPReadWriteDistance, -32);
				if ((write_ptr - s_video_buffer_read_ptr) == 0)
					Common::AtomicStore(fifo.SafeCPReadPointer, fifo.CPReadPointer);

				CommandProcessor::SetCPStatusFromGPU();

				if (param.bSyncGPU)
				{
					cyclesExecuted = (int)(cyclesExecuted / param.fSyncGpuOverclock);
					int old = s_sync_ticks.fetch_sub(cyclesExecuted);
					if (old > 0 && old - (int)cyclesExecuted <= 0)
						s_sync_wakeup_event.Set();
				}

				// This call is pretty important in DualCore mode and must be called in the FIFO Loop.
				// If we don't, s_swapRequested or s_efbAccessRequested won't be set to false
				// leading the CPU thread to wait in Video_BeginField or Video_AccessEFB thus slowing things down.
				AsyncRequests::GetInstance()->PullEvents();
			}

			// fast skip remaining GPU time if fifo is empty
			if (s_sync_ticks.load() > 0)
			{
				int old = s_sync_ticks.exchange(0);
				if (old > 0)
					s_sync_wakeup_event.Set();
			}

			// The fifo is empty and it's unlikely we will get any more work in the near future.
			// Make sure VertexManager finishes drawing any primitives it has stored in it's buffer.
			VertexManager::Flush();
		}
	}, 100);

	AsyncRequests::GetInstance()->SetEnable(false);
	AsyncRequests::GetInstance()->SetPassthrough(true);
}
Ejemplo n.º 13
0
 void unlock() noexcept {
     if ( 1 != value_.fetch_sub( 1, std::memory_order_acquire) ) {
         value_.store( 0, std::memory_order_release);
         futex_wake( & value_);
     }
 }
Ejemplo n.º 14
0
int32_t
AXUserEnd()
{
   // TODO: Implement this properly
   return sProtectLock.fetch_sub(1);
}
Ejemplo n.º 15
0
void HiresTexture::Update()
{
	s_check_native_format = false;
	s_check_new_format = false;

	if (s_prefetcher.joinable())
	{
		s_textureCacheAbortLoading.Set();
		s_prefetcher.join();
	}

	if (!g_ActiveConfig.bHiresTextures)
	{
		s_textureMap.clear();
		s_textureCache.clear();
		size_sum.store(0);
		return;
	}

	if (!g_ActiveConfig.bCacheHiresTextures)
	{
		s_textureCache.clear();
		size_sum.store(0);
	}
	
	s_textureMap.clear();
	const std::string& gameCode = SConfig::GetInstance().m_strUniqueID;
	std::string szDir = StringFromFormat("%s%s", File::GetUserPath(D_HIRESTEXTURES_IDX).c_str(), gameCode.c_str());	
	std::string ddscode(".dds");
	std::string cddscode(".DDS");
	std::vector<std::string> Extensions = {
		".png",
		".dds"
	};

	auto rFilenames = DoFileSearch(Extensions, { szDir }, /*recursive*/ true);

	const std::string code = StringFromFormat("%s_", gameCode.c_str());
	const std::string miptag = "mip";
	const std::string normaltag = ".nrm";
	for (u32 i = 0; i < rFilenames.size(); i++)
	{
		std::string FileName;
		std::string Extension;
		SplitPath(rFilenames[i], nullptr, &FileName, &Extension);
		if (FileName.substr(0, code.length()) == code)
		{
			s_check_native_format = true;
		}
		else if (FileName.substr(0, s_format_prefix.length()) == s_format_prefix)
		{
			s_check_new_format = true;
		}
		else
		{
			// Discard wrong files
			continue;
		}
		const bool is_compressed = Extension.compare(ddscode) == 0 || Extension.compare(cddscode) == 0;
		const bool is_normal_map = hasEnding(FileName, normaltag);
		if (is_normal_map)
		{
			FileName = FileName.substr(0, FileName.size() - normaltag.size());
		}
		hires_mip_level mip_level_detail(rFilenames[i], Extension, is_compressed);
		u32 level = 0;
		size_t idx = FileName.find_last_of('_');
		std::string miplevel = FileName.substr(idx + 1, std::string::npos);
		if (miplevel.substr(0, miptag.length()) == miptag)
		{
			sscanf(miplevel.substr(3, std::string::npos).c_str(), "%i", &level);
			FileName = FileName.substr(0, idx);
		}
		HiresTextureCache::iterator iter = s_textureMap.find(FileName);
		u32 min_item_size = level + 1;
		if (iter == s_textureMap.end())
		{
			HiresTextureCacheItem item(min_item_size);
			if (is_normal_map)
			{
				item.normal_map.resize(min_item_size);
			}
			std::vector<hires_mip_level> &dst = is_normal_map ? item.normal_map : item.color_map;
			dst[level] = mip_level_detail;
			s_textureMap.emplace(FileName, item);
		}
		else
		{
			std::vector<hires_mip_level> &dst = is_normal_map ? iter->second.normal_map : iter->second.color_map;
			if (dst.size() < min_item_size)
			{
				dst.resize(min_item_size);
			}
			dst[level] = mip_level_detail;
		}
	}

	if (g_ActiveConfig.bCacheHiresTextures && s_textureMap.size() > 0)
	{
		// remove cached but deleted textures
		auto iter = s_textureCache.begin();
		while (iter != s_textureCache.end())
		{
			if (s_textureMap.find(iter->first) == s_textureMap.end())
			{
				size_sum.fetch_sub(iter->second->m_cached_data_size);
				iter = s_textureCache.erase(iter);
			}
			else
			{
				iter++;
			}
		}
		s_textureCacheAbortLoading.Clear();
		s_prefetcher = std::thread(Prefetch);
	}
}
Ejemplo n.º 16
0
 count_t decrement_count ()
 {
     return get_count(pair_.fetch_sub(1, std::memory_order_acq_rel));
 }