TextureCache::TCacheEntryBase* TextureCache::Load(unsigned int const stage, u32 const address, unsigned int width, unsigned int height, int const texformat, unsigned int const tlutaddr, int const tlutfmt, bool const use_mipmaps, unsigned int maxlevel, bool const from_tmem) { if (0 == address) return nullptr; // TexelSizeInNibbles(format) * width * height / 16; const unsigned int bsw = TexDecoder_GetBlockWidthInTexels(texformat) - 1; const unsigned int bsh = TexDecoder_GetBlockHeightInTexels(texformat) - 1; unsigned int expandedWidth = (width + bsw) & (~bsw); unsigned int expandedHeight = (height + bsh) & (~bsh); const unsigned int nativeW = width; const unsigned int nativeH = height; u32 texID = address; // Hash assigned to texcache entry (also used to generate filenames used for texture dumping and custom texture lookup) u64 tex_hash = TEXHASH_INVALID; u64 tlut_hash = TEXHASH_INVALID; u32 full_format = texformat; PC_TexFormat pcfmt = PC_TEX_FMT_NONE; const bool isPaletteTexture = (texformat == GX_TF_C4 || texformat == GX_TF_C8 || texformat == GX_TF_C14X2); if (isPaletteTexture) full_format = texformat | (tlutfmt << 16); const u32 texture_size = TexDecoder_GetTextureSizeInBytes(expandedWidth, expandedHeight, texformat); const u8* src_data; if (from_tmem) src_data = &texMem[bpmem.tex[stage / 4].texImage1[stage % 4].tmem_even * TMEM_LINE_SIZE]; else src_data = Memory::GetPointer(address); // TODO: This doesn't hash GB tiles for preloaded RGBA8 textures (instead, it's hashing more data from the low tmem bank than it should) tex_hash = GetHash64(src_data, texture_size, g_ActiveConfig.iSafeTextureCache_ColorSamples); if (isPaletteTexture) { const u32 palette_size = TexDecoder_GetPaletteSize(texformat); tlut_hash = GetHash64(&texMem[tlutaddr], palette_size, g_ActiveConfig.iSafeTextureCache_ColorSamples); // NOTE: For non-paletted textures, texID is equal to the texture address. // A paletted texture, however, may have multiple texIDs assigned though depending on the currently used tlut. // This (changing texID depending on the tlut_hash) is a trick to get around // an issue with Metroid Prime's fonts (it has multiple sets of fonts on each other // stored in a single texture and uses the palette to make different characters // visible or invisible. Thus, unless we want to recreate the textures for every drawn character, // we must make sure that a paletted texture gets assigned multiple IDs for each tlut used. // // TODO: Because texID isn't always the same as the address now, CopyRenderTargetToTexture might be broken now texID ^= ((u32)tlut_hash) ^(u32)(tlut_hash >> 32); tex_hash ^= tlut_hash; } // D3D doesn't like when the specified mipmap count would require more than one 1x1-sized LOD in the mipmap chain // e.g. 64x64 with 7 LODs would have the mipmap chain 64x64,32x32,16x16,8x8,4x4,2x2,1x1,1x1, so we limit the mipmap count to 6 there while (g_ActiveConfig.backend_info.bUseMinimalMipCount && std::max(expandedWidth, expandedHeight) >> maxlevel == 0) --maxlevel; TCacheEntryBase *entry = textures[texID]; if (entry) { // 1. Calculate reference hash: // calculated from RAM texture data for normal textures. Hashes for paletted textures are modified by tlut_hash. 0 for virtual EFB copies. if (g_ActiveConfig.bCopyEFBToTexture && entry->IsEfbCopy()) tex_hash = TEXHASH_INVALID; // 2. a) For EFB copies, only the hash and the texture address need to match if (entry->IsEfbCopy() && tex_hash == entry->hash && address == entry->addr) { entry->type = TCET_EC_VRAM; // TODO: Print a warning if the format changes! In this case, // we could reinterpret the internal texture object data to the new pixel format // (similar to what is already being done in Renderer::ReinterpretPixelFormat()) return ReturnEntry(stage, entry); } // 2. b) For normal textures, all texture parameters need to match if (address == entry->addr && tex_hash == entry->hash && full_format == entry->format && entry->num_mipmaps > maxlevel && entry->native_width == nativeW && entry->native_height == nativeH) { return ReturnEntry(stage, entry); } // 3. If we reach this line, we'll have to upload the new texture data to VRAM. // If we're lucky, the texture parameters didn't change and we can reuse the internal texture object instead of destroying and recreating it. // // TODO: Don't we need to force texture decoding to RGBA8 for dynamic EFB copies? // TODO: Actually, it should be enough if the internal texture format matches... if ((entry->type == TCET_NORMAL && width == entry->virtual_width && height == entry->virtual_height && full_format == entry->format && entry->num_mipmaps > maxlevel) || (entry->type == TCET_EC_DYNAMIC && entry->native_width == width && entry->native_height == height)) { // reuse the texture } else { // delete the texture and make a new one delete entry; entry = nullptr; } } bool using_custom_texture = false; if (g_ActiveConfig.bHiresTextures) { // This function may modify width/height. pcfmt = LoadCustomTexture(tex_hash, texformat, 0, width, height); if (pcfmt != PC_TEX_FMT_NONE) { if (expandedWidth != width || expandedHeight != height) { expandedWidth = width; expandedHeight = height; // If we thought we could reuse the texture before, make sure to pool it now! if (entry) { delete entry; entry = nullptr; } } using_custom_texture = true; } } if (!using_custom_texture) { if (!(texformat == GX_TF_RGBA8 && from_tmem)) { pcfmt = TexDecoder_Decode(temp, src_data, expandedWidth, expandedHeight, texformat, tlutaddr, tlutfmt, g_ActiveConfig.backend_info.bUseRGBATextures); } else { u8* src_data_gb = &texMem[bpmem.tex[stage/4].texImage2[stage%4].tmem_odd * TMEM_LINE_SIZE]; pcfmt = TexDecoder_DecodeRGBA8FromTmem(temp, src_data, src_data_gb, expandedWidth, expandedHeight); } } u32 texLevels = use_mipmaps ? (maxlevel + 1) : 1; const bool using_custom_lods = using_custom_texture && CheckForCustomTextureLODs(tex_hash, texformat, texLevels); // Only load native mips if their dimensions fit to our virtual texture dimensions const bool use_native_mips = use_mipmaps && !using_custom_lods && (width == nativeW && height == nativeH); texLevels = (use_native_mips || using_custom_lods) ? texLevels : 1; // TODO: Should be forced to 1 for non-pow2 textures (e.g. efb copies with automatically adjusted IR) // create the entry/texture if (nullptr == entry) { textures[texID] = entry = g_texture_cache->CreateTexture(width, height, expandedWidth, texLevels, pcfmt); // Sometimes, we can get around recreating a texture if only the number of mip levels changes // e.g. if our texture cache entry got too many mipmap levels we can limit the number of used levels by setting the appropriate render states // Thus, we don't update this member for every Load, but just whenever the texture gets recreated // TODO: This is the wrong value. We should be storing the number of levels our actual texture has. // But that will currently make the above "existing entry" tests fail as "texLevels" is not calculated until after. // Currently, we might try to reuse a texture which appears to have more levels than actual, maybe.. entry->num_mipmaps = maxlevel + 1; entry->type = TCET_NORMAL; GFX_DEBUGGER_PAUSE_AT(NEXT_NEW_TEXTURE, true); } else { // load texture (CreateTexture also loads level 0) entry->Load(width, height, expandedWidth, 0); } entry->SetGeneralParameters(address, texture_size, full_format, entry->num_mipmaps); entry->SetDimensions(nativeW, nativeH, width, height); entry->hash = tex_hash; if (entry->IsEfbCopy() && !g_ActiveConfig.bCopyEFBToTexture) entry->type = TCET_EC_DYNAMIC; else entry->type = TCET_NORMAL; if (g_ActiveConfig.bDumpTextures && !using_custom_texture) DumpTexture(entry, 0); u32 level = 1; // load mips - TODO: Loading mipmaps from tmem is untested! if (pcfmt != PC_TEX_FMT_NONE) { if (use_native_mips) { src_data += texture_size; const u8* ptr_even = nullptr; const u8* ptr_odd = nullptr; if (from_tmem) { ptr_even = &texMem[bpmem.tex[stage/4].texImage1[stage%4].tmem_even * TMEM_LINE_SIZE + texture_size]; ptr_odd = &texMem[bpmem.tex[stage/4].texImage2[stage%4].tmem_odd * TMEM_LINE_SIZE]; } for (; level != texLevels; ++level) { const u32 mip_width = CalculateLevelSize(width, level); const u32 mip_height = CalculateLevelSize(height, level); const u32 expanded_mip_width = (mip_width + bsw) & (~bsw); const u32 expanded_mip_height = (mip_height + bsh) & (~bsh); const u8*& mip_src_data = from_tmem ? ((level % 2) ? ptr_odd : ptr_even) : src_data; TexDecoder_Decode(temp, mip_src_data, expanded_mip_width, expanded_mip_height, texformat, tlutaddr, tlutfmt, g_ActiveConfig.backend_info.bUseRGBATextures); mip_src_data += TexDecoder_GetTextureSizeInBytes(expanded_mip_width, expanded_mip_height, texformat); entry->Load(mip_width, mip_height, expanded_mip_width, level); if (g_ActiveConfig.bDumpTextures) DumpTexture(entry, level); } } else if (using_custom_lods) { for (; level != texLevels; ++level) { unsigned int mip_width = CalculateLevelSize(width, level); unsigned int mip_height = CalculateLevelSize(height, level); LoadCustomTexture(tex_hash, texformat, level, mip_width, mip_height); entry->Load(mip_width, mip_height, mip_width, level); } } } INCSTAT(stats.numTexturesCreated); SETSTAT(stats.numTexturesAlive, textures.size()); return ReturnEntry(stage, entry); }
TextureCache::TCacheEntryBase* TextureCache::Load(const u32 stage) { const FourTexUnits &tex = bpmem.tex[stage >> 2]; const u32 id = stage & 3; const u32 address = (tex.texImage3[id].image_base/* & 0x1FFFFF*/) << 5; u32 width = tex.texImage0[id].width + 1; u32 height = tex.texImage0[id].height + 1; const int texformat = tex.texImage0[id].format; const u32 tlutaddr = tex.texTlut[id].tmem_offset << 9; const u32 tlutfmt = tex.texTlut[id].tlut_format; const bool use_mipmaps = (tex.texMode0[id].min_filter & 3) != 0; u32 tex_levels = use_mipmaps ? ((tex.texMode1[id].max_lod + 0xf) / 0x10 + 1) : 1; const bool from_tmem = tex.texImage1[id].image_type != 0; if (0 == address) return nullptr; // TexelSizeInNibbles(format) * width * height / 16; const unsigned int bsw = TexDecoder_GetBlockWidthInTexels(texformat); const unsigned int bsh = TexDecoder_GetBlockHeightInTexels(texformat); unsigned int expandedWidth = ROUND_UP(width, bsw); unsigned int expandedHeight = ROUND_UP(height, bsh); const unsigned int nativeW = width; const unsigned int nativeH = height; // Hash assigned to texcache entry (also used to generate filenames used for texture dumping and custom texture lookup) u64 base_hash = TEXHASH_INVALID; u64 full_hash = TEXHASH_INVALID; u32 full_format = texformat; const bool isPaletteTexture = (texformat == GX_TF_C4 || texformat == GX_TF_C8 || texformat == GX_TF_C14X2); // Reject invalid tlut format. if (isPaletteTexture && tlutfmt > GX_TL_RGB5A3) return nullptr; if (isPaletteTexture) full_format = texformat | (tlutfmt << 16); const u32 texture_size = TexDecoder_GetTextureSizeInBytes(expandedWidth, expandedHeight, texformat); u32 additional_mips_size = 0; // not including level 0, which is texture_size // GPUs don't like when the specified mipmap count would require more than one 1x1-sized LOD in the mipmap chain // e.g. 64x64 with 7 LODs would have the mipmap chain 64x64,32x32,16x16,8x8,4x4,2x2,1x1,0x0, so we limit the mipmap count to 6 there tex_levels = std::min<u32>(IntLog2(std::max(width, height)) + 1, tex_levels); for (u32 level = 1; level != tex_levels; ++level) { // We still need to calculate the original size of the mips const u32 expanded_mip_width = ROUND_UP(CalculateLevelSize(width, level), bsw); const u32 expanded_mip_height = ROUND_UP(CalculateLevelSize(height, level), bsh); additional_mips_size += TexDecoder_GetTextureSizeInBytes(expanded_mip_width, expanded_mip_height, texformat); } // If we are recording a FifoLog, keep track of what memory we read. // FifiRecorder does it's own memory modification tracking independant of the texture hashing below. if (g_bRecordFifoData && !from_tmem) FifoRecorder::GetInstance().UseMemory(address, texture_size + additional_mips_size, MemoryUpdate::TEXTURE_MAP); const u8* src_data; if (from_tmem) src_data = &texMem[bpmem.tex[stage / 4].texImage1[stage % 4].tmem_even * TMEM_LINE_SIZE]; else src_data = Memory::GetPointer(address); // TODO: This doesn't hash GB tiles for preloaded RGBA8 textures (instead, it's hashing more data from the low tmem bank than it should) base_hash = GetHash64(src_data, texture_size, g_ActiveConfig.iSafeTextureCache_ColorSamples); u32 palette_size = 0; if (isPaletteTexture) { palette_size = TexDecoder_GetPaletteSize(texformat); full_hash = base_hash ^ GetHash64(&texMem[tlutaddr], palette_size, g_ActiveConfig.iSafeTextureCache_ColorSamples); } else { full_hash = base_hash; } // Search the texture cache for textures by address // // Find all texture cache entries for the current texture address, and decide whether to use one of // them, or to create a new one // // In most cases, the fastest way is to use only one texture cache entry for the same address. Usually, // when a texture changes, the old version of the texture is unlikely to be used again. If there were // new cache entries created for normal texture updates, there would be a slowdown due to a huge amount // of unused cache entries. Also thanks to texture pooling, overwriting an existing cache entry is // faster than creating a new one from scratch. // // Some games use the same address for different textures though. If the same cache entry was used in // this case, it would be constantly overwritten, and effectively there wouldn't be any caching for // those textures. Examples for this are Metroid Prime and Castlevania 3. Metroid Prime has multiple // sets of fonts on each other stored in a single texture and uses the palette to make different // characters visible or invisible. In Castlevania 3 some textures are used for 2 different things or // at least in 2 different ways(size 1024x1024 vs 1024x256). // // To determine whether to use multiple cache entries or a single entry, use the following heuristic: // If the same texture address is used several times during the same frame, assume the address is used // for different purposes and allow creating an additional cache entry. If there's at least one entry // that hasn't been used for the same frame, then overwrite it, in order to keep the cache as small as // possible. If the current texture is found in the cache, use that entry. // // For efb copies, the entry created in CopyRenderTargetToTexture always has to be used, or else it was // done in vain. std::pair<TexCache::iterator, TexCache::iterator> iter_range = textures_by_address.equal_range((u64)address); TexCache::iterator iter = iter_range.first; TexCache::iterator oldest_entry = iter; int temp_frameCount = 0x7fffffff; TexCache::iterator unconverted_copy = textures_by_address.end(); while (iter != iter_range.second) { TCacheEntryBase* entry = iter->second; // Do not load strided EFB copies, they are not meant to be used directly if (entry->IsEfbCopy() && entry->native_width == nativeW && entry->native_height == nativeH && entry->memory_stride == entry->CacheLinesPerRow() * 32) { // EFB copies have slightly different rules as EFB copy formats have different // meanings from texture formats. if ((base_hash == entry->hash && (!isPaletteTexture || g_Config.backend_info.bSupportsPaletteConversion)) || IsPlayingBackFifologWithBrokenEFBCopies) { // TODO: We should check format/width/height/levels for EFB copies. Checking // format is complicated because EFB copy formats don't exactly match // texture formats. I'm not sure what effect checking width/height/levels // would have. if (!isPaletteTexture || !g_Config.backend_info.bSupportsPaletteConversion) return ReturnEntry(stage, entry); // Note that we found an unconverted EFB copy, then continue. We'll // perform the conversion later. Currently, we only convert EFB copies to // palette textures; we could do other conversions if it proved to be // beneficial. unconverted_copy = iter; } else { // Aggressively prune EFB copies: if it isn't useful here, it will probably // never be useful again. It's theoretically possible for a game to do // something weird where the copy could become useful in the future, but in // practice it doesn't happen. iter = FreeTexture(iter); continue; } } else { // For normal textures, all texture parameters need to match if (entry->hash == full_hash && entry->format == full_format && entry->native_levels >= tex_levels && entry->native_width == nativeW && entry->native_height == nativeH) { entry = DoPartialTextureUpdates(iter); return ReturnEntry(stage, entry); } } // Find the texture which hasn't been used for the longest time. Count paletted // textures as the same texture here, when the texture itself is the same. This // improves the performance a lot in some games that use paletted textures. // Example: Sonic the Fighters (inside Sonic Gems Collection) // Skip EFB copies here, so they can be used for partial texture updates if (entry->frameCount != FRAMECOUNT_INVALID && entry->frameCount < temp_frameCount && !entry->IsEfbCopy() && !(isPaletteTexture && entry->base_hash == base_hash)) { temp_frameCount = entry->frameCount; oldest_entry = iter; } ++iter; } if (unconverted_copy != textures_by_address.end()) { // Perform palette decoding. TCacheEntryBase *entry = unconverted_copy->second; TCacheEntryConfig config; config.rendertarget = true; config.width = entry->config.width; config.height = entry->config.height; config.layers = FramebufferManagerBase::GetEFBLayers(); TCacheEntryBase *decoded_entry = AllocateTexture(config); decoded_entry->SetGeneralParameters(address, texture_size, full_format); decoded_entry->SetDimensions(entry->native_width, entry->native_height, 1); decoded_entry->SetHashes(base_hash, full_hash); decoded_entry->frameCount = FRAMECOUNT_INVALID; decoded_entry->is_efb_copy = false; g_texture_cache->ConvertTexture(decoded_entry, entry, &texMem[tlutaddr], (TlutFormat)tlutfmt); textures_by_address.emplace((u64)address, decoded_entry); return ReturnEntry(stage, decoded_entry); } // Search the texture cache for normal textures by hash // // If the texture was fully hashed, the address does not need to match. Identical duplicate textures cause unnecessary slowdowns // Example: Tales of Symphonia (GC) uses over 500 small textures in menus, but only around 70 different ones if (g_ActiveConfig.iSafeTextureCache_ColorSamples == 0 || std::max(texture_size, palette_size) <= (u32)g_ActiveConfig.iSafeTextureCache_ColorSamples * 8) { iter_range = textures_by_hash.equal_range(full_hash); iter = iter_range.first; while (iter != iter_range.second) { TCacheEntryBase* entry = iter->second; // All parameters, except the address, need to match here if (entry->format == full_format && entry->native_levels >= tex_levels && entry->native_width == nativeW && entry->native_height == nativeH) { entry = DoPartialTextureUpdates(iter); return ReturnEntry(stage, entry); } ++iter; } } // If at least one entry was not used for the same frame, overwrite the oldest one if (temp_frameCount != 0x7fffffff) { // pool this texture and make a new one later FreeTexture(oldest_entry); } std::shared_ptr<HiresTexture> hires_tex; if (g_ActiveConfig.bHiresTextures) { hires_tex = HiresTexture::Search( src_data, texture_size, &texMem[tlutaddr], palette_size, width, height, texformat, use_mipmaps ); if (hires_tex) { auto& l = hires_tex->m_levels[0]; if (l.width != width || l.height != height) { width = l.width; height = l.height; } expandedWidth = l.width; expandedHeight = l.height; CheckTempSize(l.data_size); memcpy(temp, l.data, l.data_size); } } if (!hires_tex) { if (!(texformat == GX_TF_RGBA8 && from_tmem)) { const u8* tlut = &texMem[tlutaddr]; TexDecoder_Decode(temp, src_data, expandedWidth, expandedHeight, texformat, tlut, (TlutFormat)tlutfmt); } else { u8* src_data_gb = &texMem[bpmem.tex[stage / 4].texImage2[stage % 4].tmem_odd * TMEM_LINE_SIZE]; TexDecoder_DecodeRGBA8FromTmem(temp, src_data, src_data_gb, expandedWidth, expandedHeight); } } // how many levels the allocated texture shall have const u32 texLevels = hires_tex ? (u32)hires_tex->m_levels.size() : tex_levels; // create the entry/texture TCacheEntryConfig config; config.width = width; config.height = height; config.levels = texLevels; TCacheEntryBase* entry = AllocateTexture(config); GFX_DEBUGGER_PAUSE_AT(NEXT_NEW_TEXTURE, true); iter = textures_by_address.emplace((u64)address, entry); if (g_ActiveConfig.iSafeTextureCache_ColorSamples == 0 || std::max(texture_size, palette_size) <= (u32)g_ActiveConfig.iSafeTextureCache_ColorSamples * 8) { entry->textures_by_hash_iter = textures_by_hash.emplace(full_hash, entry); } entry->SetGeneralParameters(address, texture_size, full_format); entry->SetDimensions(nativeW, nativeH, tex_levels); entry->SetHashes(base_hash, full_hash); entry->is_efb_copy = false; entry->is_custom_tex = hires_tex != nullptr; // load texture entry->Load(width, height, expandedWidth, 0); std::string basename = ""; if (g_ActiveConfig.bDumpTextures && !hires_tex) { basename = HiresTexture::GenBaseName( src_data, texture_size, &texMem[tlutaddr], palette_size, width, height, texformat, use_mipmaps, true ); DumpTexture(entry, basename, 0); } if (hires_tex) { for (u32 level = 1; level != texLevels; ++level) { auto& l = hires_tex->m_levels[level]; CheckTempSize(l.data_size); memcpy(temp, l.data, l.data_size); entry->Load(l.width, l.height, l.width, level); } } else { // load mips - TODO: Loading mipmaps from tmem is untested! src_data += texture_size; const u8* ptr_even = nullptr; const u8* ptr_odd = nullptr; if (from_tmem) { ptr_even = &texMem[bpmem.tex[stage / 4].texImage1[stage % 4].tmem_even * TMEM_LINE_SIZE + texture_size]; ptr_odd = &texMem[bpmem.tex[stage / 4].texImage2[stage % 4].tmem_odd * TMEM_LINE_SIZE]; } for (u32 level = 1; level != texLevels; ++level) { const u32 mip_width = CalculateLevelSize(width, level); const u32 mip_height = CalculateLevelSize(height, level); const u32 expanded_mip_width = ROUND_UP(mip_width, bsw); const u32 expanded_mip_height = ROUND_UP(mip_height, bsh); const u8*& mip_src_data = from_tmem ? ((level % 2) ? ptr_odd : ptr_even) : src_data; const u8* tlut = &texMem[tlutaddr]; TexDecoder_Decode(temp, mip_src_data, expanded_mip_width, expanded_mip_height, texformat, tlut, (TlutFormat)tlutfmt); mip_src_data += TexDecoder_GetTextureSizeInBytes(expanded_mip_width, expanded_mip_height, texformat); entry->Load(mip_width, mip_height, expanded_mip_width, level); if (g_ActiveConfig.bDumpTextures) DumpTexture(entry, basename, level); } } INCSTAT(stats.numTexturesUploaded); SETSTAT(stats.numTexturesAlive, textures_by_address.size()); entry = DoPartialTextureUpdates(iter); return ReturnEntry(stage, entry); }
TextureCacheBase::TCacheEntryBase* TextureCacheBase::DoPartialTextureUpdates(TexCache::iterator iter_t, u8* palette, u32 tlutfmt) { TCacheEntryBase* entry_to_update = iter_t->second; const bool isPaletteTexture = (entry_to_update->format == GX_TF_C4 || entry_to_update->format == GX_TF_C8 || entry_to_update->format == GX_TF_C14X2 || entry_to_update->format >= 0x10000); // EFB copies are excluded from these updates, until there's an example where a game would // benefit from updating. This would require more work to be done. if (entry_to_update->IsEfbCopy()) return entry_to_update; u32 block_width = TexDecoder_GetBlockWidthInTexels(entry_to_update->format & 0xf); u32 block_height = TexDecoder_GetBlockHeightInTexels(entry_to_update->format & 0xf); u32 block_size = block_width * block_height * TexDecoder_GetTexelSizeInNibbles(entry_to_update->format & 0xf) / 2; u32 numBlocksX = (entry_to_update->native_width + block_width - 1) / block_width; TexCache::iterator iter = textures_by_address.lower_bound(entry_to_update->addr > MAX_TEXTURE_BINARY_SIZE ? entry_to_update->addr - MAX_TEXTURE_BINARY_SIZE : 0); TexCache::iterator iterend = textures_by_address.upper_bound(entry_to_update->addr + entry_to_update->size_in_bytes); while (iter != iterend) { TCacheEntryBase* entry = iter->second; if (entry != entry_to_update && entry->IsEfbCopy() && entry->references.count(entry_to_update) == 0 && entry->OverlapsMemoryRange(entry_to_update->addr, entry_to_update->size_in_bytes) && entry->memory_stride == numBlocksX * block_size) { if (entry->hash == entry->CalculateHash()) { if (isPaletteTexture) { TCacheEntryBase *decoded_entry = entry->ApplyPalette(palette, tlutfmt); if (decoded_entry) { // Link the efb copy with the partially updated texture, so we won't apply this partial update again entry->CreateReference(entry_to_update); // Mark the texture update as used, as if it was loaded directly entry->frameCount = FRAMECOUNT_INVALID; entry = decoded_entry; } else { ++iter; continue; } } u32 src_x, src_y, dst_x, dst_y; // Note for understanding the math: // Normal textures can't be strided, so the 2 missing cases with src_x > 0 don't exist if (entry->addr >= entry_to_update->addr) { u32 block_offset = (entry->addr - entry_to_update->addr) / block_size; u32 block_x = block_offset % numBlocksX; u32 block_y = block_offset / numBlocksX; src_x = 0; src_y = 0; dst_x = block_x * block_width; dst_y = block_y * block_height; } else { u32 block_offset = (entry_to_update->addr - entry->addr) / block_size; u32 block_x = (~block_offset + 1) % numBlocksX; u32 block_y = (block_offset + block_x) / numBlocksX; src_x = 0; src_y = block_y * block_height; dst_x = block_x * block_width; dst_y = 0; } u32 copy_width = std::min(entry->native_width - src_x, entry_to_update->native_width - dst_x); u32 copy_height = std::min(entry->native_height - src_y, entry_to_update->native_height - dst_y); // If one of the textures is scaled, scale both with the current efb scaling factor if (entry_to_update->native_width != entry_to_update->config.width || entry_to_update->native_height != entry_to_update->config.height || entry->native_width != entry->config.width || entry->native_height != entry->config.height) { ScaleTextureCacheEntryTo(&entry_to_update, Renderer::EFBToScaledX(entry_to_update->native_width), Renderer::EFBToScaledY(entry_to_update->native_height)); ScaleTextureCacheEntryTo(&entry, Renderer::EFBToScaledX(entry->native_width), Renderer::EFBToScaledY(entry->native_height)); src_x = Renderer::EFBToScaledX(src_x); src_y = Renderer::EFBToScaledY(src_y); dst_x = Renderer::EFBToScaledX(dst_x); dst_y = Renderer::EFBToScaledY(dst_y); copy_width = Renderer::EFBToScaledX(copy_width); copy_height = Renderer::EFBToScaledY(copy_height); } MathUtil::Rectangle<int> srcrect, dstrect; srcrect.left = src_x; srcrect.top = src_y; srcrect.right = (src_x + copy_width); srcrect.bottom = (src_y + copy_height); dstrect.left = dst_x; dstrect.top = dst_y; dstrect.right = (dst_x + copy_width); dstrect.bottom = (dst_y + copy_height); entry_to_update->CopyRectangleFromTexture(entry, srcrect, dstrect); if (isPaletteTexture) { // Remove the temporary converted texture, it won't be used anywhere else // TODO: It would be nice to convert and copy in one step, but this code path isn't common InvalidateTexture(GetTexCacheIter(entry)); } else { // Link the two textures together, so we won't apply this partial update again entry->CreateReference(entry_to_update); // Mark the texture update as used, as if it was loaded directly entry->frameCount = FRAMECOUNT_INVALID; } } else { // If the hash does not match, this EFB copy will not be used for anything, so remove it iter = InvalidateTexture(iter); continue; } } ++iter; } return entry_to_update; }
TextureCache::TCacheEntryBase* TextureCache::DoPartialTextureUpdates(TexCache::iterator iter_t) { TCacheEntryBase* entry_to_update = iter_t->second; const bool isPaletteTexture = (entry_to_update->format == GX_TF_C4 || entry_to_update->format == GX_TF_C8 || entry_to_update->format == GX_TF_C14X2 || entry_to_update->format >= 0x10000); // Efb copies and paletted textures are excluded from these updates, until there's an example where a game would // benefit from this. Both would require more work to be done. // TODO: Implement upscaling support for normal textures, and then remove the efb to ram and the scaled efb restrictions if (entry_to_update->IsEfbCopy() || isPaletteTexture) return entry_to_update; u32 block_width = TexDecoder_GetBlockWidthInTexels(entry_to_update->format); u32 block_height = TexDecoder_GetBlockHeightInTexels(entry_to_update->format); u32 block_size = block_width * block_height * TexDecoder_GetTexelSizeInNibbles(entry_to_update->format) / 2; u32 numBlocksX = (entry_to_update->native_width + block_width - 1) / block_width; TexCache::iterator iter = textures_by_address.lower_bound(entry_to_update->addr); TexCache::iterator iterend = textures_by_address.upper_bound(entry_to_update->addr + entry_to_update->size_in_bytes); bool entry_need_scaling = true; while (iter != iterend) { TCacheEntryBase* entry = iter->second; if (entry != entry_to_update && entry->IsEfbCopy() && entry_to_update->addr <= entry->addr && entry->addr + entry->size_in_bytes <= entry_to_update->addr + entry_to_update->size_in_bytes && entry->frameCount == FRAMECOUNT_INVALID && entry->memory_stride == numBlocksX * block_size) { u32 block_offset = (entry->addr - entry_to_update->addr) / block_size; u32 block_x = block_offset % numBlocksX; u32 block_y = block_offset / numBlocksX; u32 x = block_x * block_width; u32 y = block_y * block_height; MathUtil::Rectangle<int> srcrect, dstrect; srcrect.left = 0; srcrect.top = 0; dstrect.left = 0; dstrect.top = 0; if (entry_need_scaling) { entry_need_scaling = false; u32 w = entry_to_update->native_width * entry->config.width / entry->native_width; u32 h = entry_to_update->native_height * entry->config.height / entry->native_height; u32 max = g_renderer->GetMaxTextureSize(); if (max < w || max < h) { iter++; continue; } if (entry_to_update->config.width != w || entry_to_update->config.height != h) { TextureCache::TCacheEntryConfig newconfig; newconfig.width = w; newconfig.height = h; newconfig.rendertarget = true; TCacheEntryBase* newentry = AllocateTexture(newconfig); newentry->SetGeneralParameters(entry_to_update->addr, entry_to_update->size_in_bytes, entry_to_update->format); newentry->SetDimensions(entry_to_update->native_width, entry_to_update->native_height, 1); newentry->SetHashes(entry_to_update->base_hash, entry_to_update->hash); newentry->frameCount = frameCount; newentry->is_efb_copy = false; srcrect.right = entry_to_update->config.width; srcrect.bottom = entry_to_update->config.height; dstrect.right = w; dstrect.bottom = h; newentry->CopyRectangleFromTexture(entry_to_update, srcrect, dstrect); entry_to_update = newentry; u64 key = iter_t->first; iter_t = FreeTexture(iter_t); textures_by_address.emplace(key, entry_to_update); } } srcrect.right = entry->config.width; srcrect.bottom = entry->config.height; dstrect.left = x * entry_to_update->config.width / entry_to_update->native_width; dstrect.top = y * entry_to_update->config.height / entry_to_update->native_height; dstrect.right = (x + entry->native_width) * entry_to_update->config.width / entry_to_update->native_width; dstrect.bottom = (y + entry->native_height) * entry_to_update->config.height / entry_to_update->native_height; entry_to_update->CopyRectangleFromTexture(entry, srcrect, dstrect); // Mark the texture update as used, so it isn't applied more than once entry->frameCount = frameCount; } ++iter; } return entry_to_update; }
TextureCacheBase::TCacheEntryBase* TextureCacheBase::DoPartialTextureUpdates(TexCache::iterator iter_t) { TCacheEntryBase* entry_to_update = iter_t->second; const bool isPaletteTexture = (entry_to_update->format == GX_TF_C4 || entry_to_update->format == GX_TF_C8 || entry_to_update->format == GX_TF_C14X2 || entry_to_update->format >= 0x10000); // Efb copies and paletted textures are excluded from these updates, until there's an example where a game would // benefit from this. Both would require more work to be done. if (entry_to_update->IsEfbCopy() || isPaletteTexture) return entry_to_update; u32 block_width = TexDecoder_GetBlockWidthInTexels(entry_to_update->format & 0xf); u32 block_height = TexDecoder_GetBlockHeightInTexels(entry_to_update->format & 0xf); u32 block_size = block_width * block_height * TexDecoder_GetTexelSizeInNibbles(entry_to_update->format & 0xf) / 2; u32 numBlocksX = (entry_to_update->native_width + block_width - 1) / block_width; TexCache::iterator iter = textures_by_address.lower_bound(entry_to_update->addr); TexCache::iterator iterend = textures_by_address.upper_bound(entry_to_update->addr + entry_to_update->size_in_bytes); while (iter != iterend) { TCacheEntryBase* entry = iter->second; if (entry != entry_to_update && entry->IsEfbCopy() && entry->OverlapsMemoryRange(entry_to_update->addr, entry_to_update->size_in_bytes) && entry->frameCount == FRAMECOUNT_INVALID && entry->memory_stride == numBlocksX * block_size) { if (entry->hash == entry->CalculateHash()) { u32 src_x, src_y, dst_x, dst_y; // Note for understanding the math: // Normal textures can't be strided, so the 2 missing cases with src_x > 0 don't exist if (entry->addr >= entry_to_update->addr) { u32 block_offset = (entry->addr - entry_to_update->addr) / block_size; u32 block_x = block_offset % numBlocksX; u32 block_y = block_offset / numBlocksX; src_x = 0; src_y = 0; dst_x = block_x * block_width; dst_y = block_y * block_height; } else { u32 block_offset = (entry_to_update->addr - entry->addr) / block_size; u32 block_x = (~block_offset + 1) % numBlocksX; u32 block_y = (block_offset + block_x) / numBlocksX; src_x = 0; src_y = block_y * block_height; dst_x = block_x * block_width; dst_y = 0; } u32 copy_width = std::min(entry->native_width - src_x, entry_to_update->native_width - dst_x); u32 copy_height = std::min(entry->native_height - src_y, entry_to_update->native_height - dst_y); // If one of the textures is scaled, scale both with the current efb scaling factor if (entry_to_update->native_width != entry_to_update->config.width || entry_to_update->native_height != entry_to_update->config.height || entry->native_width != entry->config.width || entry->native_height != entry->config.height) { ScaleTextureCacheEntryTo(&entry_to_update, Renderer::EFBToScaledX(entry_to_update->native_width), Renderer::EFBToScaledY(entry_to_update->native_height)); ScaleTextureCacheEntryTo(&entry, Renderer::EFBToScaledX(entry->native_width), Renderer::EFBToScaledY(entry->native_height)); src_x = Renderer::EFBToScaledX(src_x); src_y = Renderer::EFBToScaledY(src_y); dst_x = Renderer::EFBToScaledX(dst_x); dst_y = Renderer::EFBToScaledY(dst_y); copy_width = Renderer::EFBToScaledX(copy_width); copy_height = Renderer::EFBToScaledY(copy_height); } MathUtil::Rectangle<int> srcrect, dstrect; srcrect.left = src_x; srcrect.top = src_y; srcrect.right = (src_x + copy_width); srcrect.bottom = (src_y + copy_height); dstrect.left = dst_x; dstrect.top = dst_y; dstrect.right = (dst_x + copy_width); dstrect.bottom = (dst_y + copy_height); entry_to_update->CopyRectangleFromTexture(entry, srcrect, dstrect); // Mark the texture update as used, so it isn't applied more than once entry->frameCount = frameCount; } else { // If the hash does not match, this EFB copy will not be used for anything, so remove it iter = FreeTexture(iter); continue; } } ++iter; } return entry_to_update; }