Beispiel #1
0
S64 LLTextureCache::initCache(ELLPath location, S64 max_size, BOOL read_only)
{
	mReadOnly = read_only;
	
	S64 header_size = (max_size * 2) / 10;
	S64 max_entries = header_size / TEXTURE_CACHE_ENTRY_SIZE;
	sCacheMaxEntries = (S32)(llmin((S64)sCacheMaxEntries, max_entries));
	header_size = sCacheMaxEntries * TEXTURE_CACHE_ENTRY_SIZE;
	max_size -= header_size;
	if (sCacheMaxTexturesSize > 0)
		sCacheMaxTexturesSize = llmin(sCacheMaxTexturesSize, max_size);
	else
		sCacheMaxTexturesSize = max_size;
	max_size -= sCacheMaxTexturesSize;
	
	LL_INFOS("TextureCache") << "Headers: " << sCacheMaxEntries
			<< " Textures size: " << sCacheMaxTexturesSize/(1024*1024) << " MB" << LL_ENDL;

	setDirNames(location);
	
	if (!mReadOnly)
	{
		LLFile::mkdir(mTexturesDirName);
		const char* subdirs = "0123456789abcdef";
		for (S32 i=0; i<16; i++)
		{
			std::string dirname = mTexturesDirName + gDirUtilp->getDirDelimiter() + subdirs[i];
			LLFile::mkdir(dirname);
		}
	}
	readHeaderCache();
	purgeTextures(true); // calc mTexturesSize and make some room in the texture cache if we need it

	return max_size; // unused cache space
}
Beispiel #2
0
// Writes imagesize to the header, updates timestamp
S32 LLTextureCache::setHeaderCacheEntry(const LLUUID& id, S32 imagesize)
{
	mHeaderMutex.lock();
	llassert_always(imagesize >= 0);
	Entry entry;
	S32 idx = openAndReadEntry(id, entry, true);
	if (idx >= 0)
	{
		entry.mImageSize = imagesize;
		writeEntryAndClose(idx, entry);
		mHeaderMutex.unlock();
	}
	else // retry
	{
		mHeaderMutex.unlock();
		readHeaderCache(); // We couldn't write an entry, so refresh the LRU
		mHeaderMutex.lock();
		llassert_always(!mLRU.empty() || mHeaderEntriesInfo.mEntries < sCacheMaxEntries);
		mHeaderMutex.unlock();
		idx = setHeaderCacheEntry(id, imagesize); // assert above ensures no inf. recursion
	}
	return idx;
}
Beispiel #3
0
// Called from work thread
S32 LLTextureCache::getHeaderCacheEntry(const LLUUID& id, bool touch, S32* imagesize)
{
	bool retry = false;
	S32 idx = -1;

	{
		LLMutexLock lock(&mHeaderMutex);
		id_map_t::iterator iter = mHeaderIDMap.find(id);
		if (iter != mHeaderIDMap.end())
		{
			idx = iter->second;
		}
		else if (touch && !mReadOnly)
		{
			if (mHeaderEntriesInfo.mEntries < sCacheMaxEntries)
			{
				// Add an entry
				idx = mHeaderEntriesInfo.mEntries++;
				mHeaderIDMap[id] = idx;
				// Update Info
				ll_apr_file_write_ex(mHeaderEntriesFileName, getFileAPRPool(),
									(U8*)&mHeaderEntriesInfo, 0, sizeof(EntriesInfo));
			}
			else if (!mLRU.empty())
			{
				idx = mLRU.begin()->first; // will be erased below
				const LLUUID& oldid = mLRU.begin()->second;
				mHeaderIDMap.erase(oldid);
				mTexturesSizeMap.erase(oldid);
				mHeaderIDMap[id] = idx;
			}
			else
			{
				idx = -1;
				retry = true;
			}
		}
		if (idx >= 0)
		{
			if (touch && !mReadOnly)
			{
				// Update the lru entry
				mLRU.erase(idx);
				llassert_always(imagesize && *imagesize > 0);
				Entry* entry = new Entry(id, *imagesize, time(NULL));
				S32 offset = sizeof(EntriesInfo) + idx * sizeof(Entry);
				ll_apr_file_write_ex(mHeaderEntriesFileName, getFileAPRPool(),
									 (U8*)entry, offset, sizeof(Entry));
				delete entry;
			}
			else if (imagesize)
			{
				// Get the image size
				Entry entry;
				S32 offset = sizeof(EntriesInfo) + idx * sizeof(Entry);
				ll_apr_file_read_ex(mHeaderEntriesFileName, getFileAPRPool(),
									(U8*)&entry, offset, sizeof(Entry));
				*imagesize = entry.mSize;
			}
		}
	}
	if (retry)
	{
		readHeaderCache(getFileAPRPool()); // updates the lru
		llassert_always(!mLRU.empty() || mHeaderEntriesInfo.mEntries < sCacheMaxEntries);
		idx = getHeaderCacheEntry(id, touch, imagesize); // assert above ensures no inf. recursion
	}
	return idx;
}
Beispiel #4
0
// Called from either the main thread or the worker thread
void LLTextureCache::readHeaderCache()
{
	mHeaderMutex.lock();

	mLRU.clear(); // always clear the LRU

	readEntriesHeader();
	
	if (mHeaderEntriesInfo.mVersion != sHeaderCacheVersion)
	{
		if (!mReadOnly)
		{
			purgeAllTextures(false);
		}
	}
	else
	{
		std::vector<Entry> entries;
		U32 num_entries = openAndReadEntries(entries);
		if (num_entries)
		{
			U32 empty_entries = 0;
			typedef std::pair<U32, LLUUID> lru_data_t;
			std::set<lru_data_t> lru;
			std::set<LLUUID> purge_list;
			for (U32 i=0; i<num_entries; i++)
			{
				Entry& entry = entries[i];
				const LLUUID& id = entry.mID;
				if (entry.mImageSize <= 0)
				{
					// This will be in the Free List, don't put it in the LRU
					++empty_entries;
				}
				else
				{
					lru.insert(std::make_pair(entry.mTime, id));
					if (entry.mBodySize > 0)
					{
						if (entry.mBodySize > entry.mImageSize)
						{
							// Shouldn't happen, failsafe only
							llwarns << "Bad entry: " << i << ": " << entry.mID << ": BodySize: " << entry.mBodySize << llendl;
							purge_list.insert(id);
						}
					}
				}
			}
			if (num_entries - empty_entries > sCacheMaxEntries)
			{
				// Special case: cache size was reduced, need to remove entries
				// Note: After we prune entries, we will call this again and create the LRU
				U32 entries_to_purge = (num_entries - empty_entries) - sCacheMaxEntries;
				llinfos << "Texture Cache Entries: " << num_entries << " Max: " << sCacheMaxEntries << " Empty: " << empty_entries << " Purging: " << entries_to_purge << llendl;
				// We can exit the following loop with the given condition, since if we'd reach the end of the lru set we'd have:
				// purge_list.size() = lru.size() = num_entries - empty_entries = entries_to_purge + sCacheMaxEntries >= entries_to_purge
				for (std::set<lru_data_t>::iterator iter = lru.begin(); purge_list.size() < entries_to_purge; ++iter)
				{
					purge_list.insert(iter->second);
				}
				llassert_always(purge_list.size() >= entries_to_purge);
			}
			else
			{
				S32 lru_entries = (S32)((F32)sCacheMaxEntries * TEXTURE_CACHE_LRU_SIZE);
				for (std::set<lru_data_t>::iterator iter = lru.begin(); iter != lru.end(); ++iter)
				{
					mLRU.insert(iter->second);
// 					llinfos << "LRU: " << iter->first << " : " << iter->second << llendl;
					if (--lru_entries <= 0)
						break;
				}
			}
			
			if (purge_list.size() > 0)
			{
				for (std::set<LLUUID>::iterator iter = purge_list.begin(); iter != purge_list.end(); ++iter)
				{
					removeFromCacheLocked(*iter);
				}
				// If we removed any entries, we need to rebuild the entries list,
				// write the header, and call this again
				std::vector<Entry> new_entries;
				for (U32 i=0; i<num_entries; i++)
				{
					const Entry& entry = entries[i];
					if (entry.mImageSize > 0)
					{
						new_entries.push_back(entry);
					}
				}
				llassert_always(new_entries.size() <= sCacheMaxEntries);
				mHeaderEntriesInfo.mEntries = new_entries.size();
				writeEntriesHeader();
				writeEntriesAndClose(new_entries);
				mHeaderMutex.unlock(); // unlock the mutex before calling again
				readHeaderCache(); // repeat with new entries file
				return;
			}
			else
			{
				//entries are not changed, nothing here.
			}
		}
	}
	mHeaderMutex.unlock();
}