void DigestCacheSet(DigestCache* self, const char* filename, uint32_t hash, uint64_t timestamp, const HashDigest& digest) { ReadWriteLockWrite(&self->m_Lock); DigestCacheRecord* r; if (nullptr != (r = (DigestCacheRecord*) HashTableLookup(&self->m_Table, hash, filename))) { r->m_Timestamp = timestamp; r->m_ContentDigest = digest; r->m_AccessTime = self->m_AccessTime; } else { r = LinearAllocate<DigestCacheRecord>(&self->m_Allocator); r->m_Hash = hash; r->m_ContentDigest = digest; r->m_Next = nullptr; r->m_String = StrDup(&self->m_Allocator, filename); r->m_Timestamp = timestamp; r->m_AccessTime = self->m_AccessTime; HashTableInsert(&self->m_Table, r); } ReadWriteUnlockWrite(&self->m_Lock); }
void ScanCacheInsert( ScanCache* self, const HashDigest& key, uint64_t timestamp, const char** included_files, int count) { AtomicIncrement(&g_Stats.m_ScanCacheInserts); ReadWriteLockWrite(&self->m_Lock); ScanCache::Record* record = LookupDynamic(self, key); // See if we have this record already (races to insert same include set are possible) if (nullptr == record || record->m_FileTimestamp != timestamp) { // Make sure we have room to insert. ScanCachePrepareInsert(self); uint32_t table_size = self->m_TableSize; #if ENABLED(USE_SHA1_HASH) uint32_t hash = key.m_Words.m_C; #elif ENABLED(USE_FAST_HASH) uint32_t hash = key.m_Words32[0]; #endif uint32_t index = hash &(table_size - 1); // Allocate a new record if needed const bool is_fresh = record == nullptr; if (is_fresh) { record = LinearAllocate<ScanCache::Record>(self->m_Allocator); record->m_Key = key; } record->m_FileTimestamp = timestamp; record->m_IncludeCount = count; record->m_Includes = LinearAllocateArray<FileAndHash>(self->m_Allocator, count); for (int i = 0; i < count; ++i) { record->m_Includes[i].m_Filename = StrDup(self->m_Allocator, included_files[i]); record->m_Includes[i].m_Hash = Djb2HashPath(included_files[i]); } if (is_fresh) { record->m_Next = self->m_Table[index]; self->m_Table[index] = record; self->m_RecordCount++; } } ReadWriteUnlockWrite(&self->m_Lock); }