예제 #1
0
bool ScanCacheDirty(ScanCache* self)
{
  bool result;

  ReadWriteLockRead(&self->m_Lock);

  result = self->m_RecordCount > 0;

  ReadWriteUnlockRead(&self->m_Lock);

  return result;
}
예제 #2
0
bool DigestCacheGet(DigestCache* self, const char* filename, uint32_t hash, uint64_t timestamp, HashDigest* digest_out)
{
  bool result = false;

  ReadWriteLockRead(&self->m_Lock);

  if (DigestCacheRecord* r = (DigestCacheRecord*) HashTableLookup(&self->m_Table, hash, filename))
  {
    if (r->m_Timestamp == timestamp)
    {
      // Technically violates r/w lock - doesn't matter
      r->m_AccessTime = self->m_AccessTime;
      *digest_out     = r->m_ContentDigest;
      result          = true;
    }
  }

  ReadWriteUnlockRead(&self->m_Lock);

  return result;
}
예제 #3
0
bool ScanCacheLookup(ScanCache* self, const HashDigest& key, uint64_t timestamp, ScanCacheLookupResult* result_out, MemAllocLinear* scratch)
{
  bool success = false;

  // First check previously cached data. No lock needed for this as it is purely read-only
  //
  // We expect most data to be in here as header files don't change that frequently.
  const ScanData* scan_data = self->m_FrozenData;

  if (scan_data)
  {
    const int32_t count = scan_data->m_EntryCount;

    if (const HashDigest* ptr = BinarySearch(scan_data->m_Keys.Get(), count, key))
    {
      int                   index      = int(ptr - scan_data->m_Keys.Get());
      const ScanCacheEntry *entry      = scan_data->m_Data.Get() + index;

      if (entry->m_FileTimestamp == timestamp)
      {
        int                   file_count = entry->m_IncludedFiles.GetCount();

        FileAndHash *output = LinearAllocateArray<FileAndHash>(scratch, file_count);

        for (int i = 0; i < file_count; ++i)
        {
          output[i].m_Filename = entry->m_IncludedFiles[i].m_Filename;
          output[i].m_Hash     = entry->m_IncludedFiles[i].m_Hash;
        }

        result_out->m_IncludedFileCount = file_count;
        result_out->m_IncludedFiles     = output;
        success                         = true;

        // Flag this frozen record as having being accesses, so we don't throw it
        // away due to timing out. This is technically a race, but we trust CPUs
        // to sort out the cache line sharing via their cache coherency model.
        self->m_FrozenAccess[index]     = 1;

        AtomicIncrement(&g_Stats.m_OldScanCacheHits);
      }
    }
  }

  if (!success)
  {
    // Consult dynamic state for this session.
    result_out->m_IncludedFileCount = 0;
    result_out->m_IncludedFiles     = nullptr;

    ReadWriteLockRead(&self->m_Lock);

    if (ScanCache::Record* record = LookupDynamic(self, key))
    {
      if (record->m_FileTimestamp == timestamp)
      {
        result_out->m_IncludedFileCount = record->m_IncludeCount;
        result_out->m_IncludedFiles     = record->m_Includes;
        success                         = true;
      }
    }

    ReadWriteUnlockRead(&self->m_Lock);

    if (success)
    {
      AtomicIncrement(&g_Stats.m_NewScanCacheHits);
    }
  }

  if (!success)
  {
    AtomicIncrement(&g_Stats.m_ScanCacheMisses);
  }

  return success;
}