TextureCacheBase.cpp 121 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199
  1. // Copyright 2010 Dolphin Emulator Project
  2. // SPDX-License-Identifier: GPL-2.0-or-later
  3. #include "VideoCommon/TextureCacheBase.h"
  4. #include <algorithm>
  5. #include <cmath>
  6. #include <cstring>
  7. #include <memory>
  8. #include <string>
  9. #include <utility>
  10. #include <vector>
  11. #if defined(_M_X86_64)
  12. #include <pmmintrin.h>
  13. #endif
  14. #include <fmt/format.h>
  15. #include "Common/Align.h"
  16. #include "Common/Assert.h"
  17. #include "Common/ChunkFile.h"
  18. #include "Common/CommonTypes.h"
  19. #include "Common/FileUtil.h"
  20. #include "Common/Hash.h"
  21. #include "Common/Logging/Log.h"
  22. #include "Common/MathUtil.h"
  23. #include "Common/MemoryUtil.h"
  24. #include "Core/Config/GraphicsSettings.h"
  25. #include "Core/ConfigManager.h"
  26. #include "Core/FifoPlayer/FifoPlayer.h"
  27. #include "Core/FifoPlayer/FifoRecorder.h"
  28. #include "Core/HW/Memmap.h"
  29. #include "Core/System.h"
  30. #include "VideoCommon/AbstractFramebuffer.h"
  31. #include "VideoCommon/AbstractGfx.h"
  32. #include "VideoCommon/AbstractStagingTexture.h"
  33. #include "VideoCommon/Assets/CustomTextureData.h"
  34. #include "VideoCommon/BPMemory.h"
  35. #include "VideoCommon/FramebufferManager.h"
  36. #include "VideoCommon/GraphicsModSystem/Runtime/FBInfo.h"
  37. #include "VideoCommon/GraphicsModSystem/Runtime/GraphicsModActionData.h"
  38. #include "VideoCommon/GraphicsModSystem/Runtime/GraphicsModManager.h"
  39. #include "VideoCommon/HiresTextures.h"
  40. #include "VideoCommon/OpcodeDecoding.h"
  41. #include "VideoCommon/PixelShaderManager.h"
  42. #include "VideoCommon/Present.h"
  43. #include "VideoCommon/ShaderCache.h"
  44. #include "VideoCommon/Statistics.h"
  45. #include "VideoCommon/TMEM.h"
  46. #include "VideoCommon/TextureConversionShader.h"
  47. #include "VideoCommon/TextureConverterShaderGen.h"
  48. #include "VideoCommon/TextureDecoder.h"
  49. #include "VideoCommon/VertexManagerBase.h"
  50. #include "VideoCommon/VideoCommon.h"
  51. #include "VideoCommon/VideoConfig.h"
  52. static const u64 TEXHASH_INVALID = 0;
  53. // Sonic the Fighters (inside Sonic Gems Collection) loops a 64 frames animation
  54. static const int TEXTURE_KILL_THRESHOLD = 64;
  55. static const int TEXTURE_POOL_KILL_THRESHOLD = 3;
  56. static int xfb_count = 0;
  57. std::unique_ptr<TextureCacheBase> g_texture_cache;
  58. TCacheEntry::TCacheEntry(std::unique_ptr<AbstractTexture> tex,
  59. std::unique_ptr<AbstractFramebuffer> fb)
  60. : texture(std::move(tex)), framebuffer(std::move(fb))
  61. {
  62. }
  63. TCacheEntry::~TCacheEntry()
  64. {
  65. for (auto& reference : references)
  66. reference->references.erase(this);
  67. ASSERT_MSG(VIDEO, g_texture_cache, "Texture cache destroyed before TCacheEntry was destroyed");
  68. g_texture_cache->ReleaseToPool(this);
  69. }
  70. void TextureCacheBase::CheckTempSize(size_t required_size)
  71. {
  72. if (required_size <= m_temp_size)
  73. return;
  74. m_temp_size = required_size;
  75. Common::FreeAlignedMemory(m_temp);
  76. m_temp = static_cast<u8*>(Common::AllocateAlignedMemory(m_temp_size, 16));
  77. }
  78. TextureCacheBase::TextureCacheBase()
  79. {
  80. SetBackupConfig(g_ActiveConfig);
  81. m_temp_size = 2048 * 2048 * 4;
  82. m_temp = static_cast<u8*>(Common::AllocateAlignedMemory(m_temp_size, 16));
  83. TexDecoder_SetTexFmtOverlayOptions(m_backup_config.texfmt_overlay,
  84. m_backup_config.texfmt_overlay_center);
  85. HiresTexture::Init();
  86. TMEM::InvalidateAll();
  87. }
  88. void TextureCacheBase::Shutdown()
  89. {
  90. // Clear pending EFB copies first, so we don't try to flush them.
  91. m_pending_efb_copies.clear();
  92. HiresTexture::Shutdown();
  93. // For correctness, we need to invalidate textures before the gpu context starts shutting down.
  94. Invalidate();
  95. }
  96. TextureCacheBase::~TextureCacheBase()
  97. {
  98. Common::FreeAlignedMemory(m_temp);
  99. m_temp = nullptr;
  100. }
  101. bool TextureCacheBase::Initialize()
  102. {
  103. if (!CreateUtilityTextures())
  104. {
  105. PanicAlertFmt("Failed to create utility textures.");
  106. return false;
  107. }
  108. return true;
  109. }
  110. void TextureCacheBase::Invalidate()
  111. {
  112. FlushEFBCopies();
  113. TMEM::InvalidateAll();
  114. for (auto& bind : m_bound_textures)
  115. bind.reset();
  116. m_textures_by_hash.clear();
  117. m_textures_by_address.clear();
  118. m_texture_pool.clear();
  119. }
  120. void TextureCacheBase::OnConfigChanged(const VideoConfig& config)
  121. {
  122. if (config.bHiresTextures != m_backup_config.hires_textures ||
  123. config.bCacheHiresTextures != m_backup_config.cache_hires_textures)
  124. {
  125. HiresTexture::Update();
  126. }
  127. const u32 change_count =
  128. config.graphics_mod_config ? config.graphics_mod_config->GetChangeCount() : 0;
  129. // TODO: Invalidating texcache is really stupid in some of these cases
  130. if (config.iSafeTextureCache_ColorSamples != m_backup_config.color_samples ||
  131. config.bTexFmtOverlayEnable != m_backup_config.texfmt_overlay ||
  132. config.bTexFmtOverlayCenter != m_backup_config.texfmt_overlay_center ||
  133. config.bHiresTextures != m_backup_config.hires_textures ||
  134. config.bEnableGPUTextureDecoding != m_backup_config.gpu_texture_decoding ||
  135. config.bDisableCopyToVRAM != m_backup_config.disable_vram_copies ||
  136. config.bArbitraryMipmapDetection != m_backup_config.arbitrary_mipmap_detection ||
  137. config.bGraphicMods != m_backup_config.graphics_mods ||
  138. change_count != m_backup_config.graphics_mod_change_count)
  139. {
  140. Invalidate();
  141. TexDecoder_SetTexFmtOverlayOptions(config.bTexFmtOverlayEnable, config.bTexFmtOverlayCenter);
  142. }
  143. SetBackupConfig(config);
  144. }
  145. void TextureCacheBase::Cleanup(int _frameCount)
  146. {
  147. TexAddrCache::iterator iter = m_textures_by_address.begin();
  148. TexAddrCache::iterator tcend = m_textures_by_address.end();
  149. while (iter != tcend)
  150. {
  151. if (iter->second->frameCount == FRAMECOUNT_INVALID)
  152. {
  153. iter->second->frameCount = _frameCount;
  154. ++iter;
  155. }
  156. else if (_frameCount > TEXTURE_KILL_THRESHOLD + iter->second->frameCount)
  157. {
  158. if (iter->second->IsCopy())
  159. {
  160. // Only remove EFB copies when they wouldn't be used anymore(changed hash), because EFB
  161. // copies living on the
  162. // host GPU are unrecoverable. Perform this check only every TEXTURE_KILL_THRESHOLD for
  163. // performance reasons
  164. if ((_frameCount - iter->second->frameCount) % TEXTURE_KILL_THRESHOLD == 1 &&
  165. iter->second->hash != iter->second->CalculateHash())
  166. {
  167. iter = InvalidateTexture(iter);
  168. }
  169. else
  170. {
  171. ++iter;
  172. }
  173. }
  174. else
  175. {
  176. iter = InvalidateTexture(iter);
  177. }
  178. }
  179. else
  180. {
  181. ++iter;
  182. }
  183. }
  184. TexPool::iterator iter2 = m_texture_pool.begin();
  185. TexPool::iterator tcend2 = m_texture_pool.end();
  186. while (iter2 != tcend2)
  187. {
  188. if (iter2->second.frameCount == FRAMECOUNT_INVALID)
  189. {
  190. iter2->second.frameCount = _frameCount;
  191. }
  192. if (_frameCount > TEXTURE_POOL_KILL_THRESHOLD + iter2->second.frameCount)
  193. {
  194. iter2 = m_texture_pool.erase(iter2);
  195. }
  196. else
  197. {
  198. ++iter2;
  199. }
  200. }
  201. }
  202. bool TCacheEntry::OverlapsMemoryRange(u32 range_address, u32 range_size) const
  203. {
  204. if (addr + size_in_bytes <= range_address)
  205. return false;
  206. if (addr >= range_address + range_size)
  207. return false;
  208. return true;
  209. }
  210. void TextureCacheBase::SetBackupConfig(const VideoConfig& config)
  211. {
  212. m_backup_config.color_samples = config.iSafeTextureCache_ColorSamples;
  213. m_backup_config.texfmt_overlay = config.bTexFmtOverlayEnable;
  214. m_backup_config.texfmt_overlay_center = config.bTexFmtOverlayCenter;
  215. m_backup_config.hires_textures = config.bHiresTextures;
  216. m_backup_config.cache_hires_textures = config.bCacheHiresTextures;
  217. m_backup_config.stereo_3d = config.stereo_mode != StereoMode::Off;
  218. m_backup_config.efb_mono_depth = config.bStereoEFBMonoDepth;
  219. m_backup_config.gpu_texture_decoding = config.bEnableGPUTextureDecoding;
  220. m_backup_config.disable_vram_copies = config.bDisableCopyToVRAM;
  221. m_backup_config.arbitrary_mipmap_detection = config.bArbitraryMipmapDetection;
  222. m_backup_config.graphics_mods = config.bGraphicMods;
  223. m_backup_config.graphics_mod_change_count =
  224. config.graphics_mod_config ? config.graphics_mod_config->GetChangeCount() : 0;
  225. }
  226. bool TextureCacheBase::DidLinkedAssetsChange(const TCacheEntry& entry)
  227. {
  228. for (const auto& cached_asset : entry.linked_game_texture_assets)
  229. {
  230. if (cached_asset.m_asset)
  231. {
  232. if (cached_asset.m_asset->GetLastLoadedTime() > cached_asset.m_cached_write_time)
  233. return true;
  234. }
  235. }
  236. for (const auto& cached_asset : entry.linked_asset_dependencies)
  237. {
  238. if (cached_asset.m_asset)
  239. {
  240. if (cached_asset.m_asset->GetLastLoadedTime() > cached_asset.m_cached_write_time)
  241. return true;
  242. }
  243. }
  244. return false;
  245. }
  246. RcTcacheEntry TextureCacheBase::ApplyPaletteToEntry(RcTcacheEntry& entry, const u8* palette,
  247. TLUTFormat tlutfmt)
  248. {
  249. DEBUG_ASSERT(g_ActiveConfig.backend_info.bSupportsPaletteConversion);
  250. const AbstractPipeline* pipeline = g_shader_cache->GetPaletteConversionPipeline(tlutfmt);
  251. if (!pipeline)
  252. {
  253. ERROR_LOG_FMT(VIDEO, "Failed to get conversion pipeline for format {}", tlutfmt);
  254. return {};
  255. }
  256. TextureConfig new_config = entry->texture->GetConfig();
  257. new_config.levels = 1;
  258. new_config.flags |= AbstractTextureFlag_RenderTarget;
  259. RcTcacheEntry decoded_entry = AllocateCacheEntry(new_config);
  260. if (!decoded_entry)
  261. return decoded_entry;
  262. decoded_entry->SetGeneralParameters(entry->addr, entry->size_in_bytes, entry->format,
  263. entry->should_force_safe_hashing);
  264. decoded_entry->SetDimensions(entry->native_width, entry->native_height, 1);
  265. decoded_entry->SetHashes(entry->base_hash, entry->hash);
  266. decoded_entry->frameCount = FRAMECOUNT_INVALID;
  267. decoded_entry->should_force_safe_hashing = false;
  268. decoded_entry->SetNotCopy();
  269. decoded_entry->may_have_overlapping_textures = entry->may_have_overlapping_textures;
  270. g_gfx->BeginUtilityDrawing();
  271. const u32 palette_size = entry->format == TextureFormat::I4 ? 32 : 512;
  272. u32 texel_buffer_offset;
  273. if (g_vertex_manager->UploadTexelBuffer(palette, palette_size,
  274. TexelBufferFormat::TEXEL_BUFFER_FORMAT_R16_UINT,
  275. &texel_buffer_offset))
  276. {
  277. struct Uniforms
  278. {
  279. float multiplier;
  280. u32 texel_buffer_offset;
  281. u32 pad[2];
  282. };
  283. static_assert(std::is_standard_layout<Uniforms>::value);
  284. Uniforms uniforms = {};
  285. uniforms.multiplier = entry->format == TextureFormat::I4 ? 15.0f : 255.0f;
  286. uniforms.texel_buffer_offset = texel_buffer_offset;
  287. g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
  288. g_gfx->SetAndDiscardFramebuffer(decoded_entry->framebuffer.get());
  289. g_gfx->SetViewportAndScissor(decoded_entry->texture->GetRect());
  290. g_gfx->SetPipeline(pipeline);
  291. g_gfx->SetTexture(1, entry->texture.get());
  292. g_gfx->SetSamplerState(1, RenderState::GetPointSamplerState());
  293. g_gfx->Draw(0, 3);
  294. g_gfx->EndUtilityDrawing();
  295. decoded_entry->texture->FinishedRendering();
  296. }
  297. else
  298. {
  299. ERROR_LOG_FMT(VIDEO, "Texel buffer upload of {} bytes failed", palette_size);
  300. g_gfx->EndUtilityDrawing();
  301. }
  302. m_textures_by_address.emplace(decoded_entry->addr, decoded_entry);
  303. return decoded_entry;
  304. }
  305. RcTcacheEntry TextureCacheBase::ReinterpretEntry(const RcTcacheEntry& existing_entry,
  306. TextureFormat new_format)
  307. {
  308. const AbstractPipeline* pipeline =
  309. g_shader_cache->GetTextureReinterpretPipeline(existing_entry->format.texfmt, new_format);
  310. if (!pipeline)
  311. {
  312. ERROR_LOG_FMT(VIDEO, "Failed to obtain texture reinterpreting pipeline from format {} to {}",
  313. existing_entry->format.texfmt, new_format);
  314. return {};
  315. }
  316. TextureConfig new_config = existing_entry->texture->GetConfig();
  317. new_config.levels = 1;
  318. new_config.flags |= AbstractTextureFlag_RenderTarget;
  319. RcTcacheEntry reinterpreted_entry = AllocateCacheEntry(new_config);
  320. if (!reinterpreted_entry)
  321. return {};
  322. reinterpreted_entry->SetGeneralParameters(existing_entry->addr, existing_entry->size_in_bytes,
  323. new_format, existing_entry->should_force_safe_hashing);
  324. reinterpreted_entry->SetDimensions(existing_entry->native_width, existing_entry->native_height,
  325. 1);
  326. reinterpreted_entry->SetHashes(existing_entry->base_hash, existing_entry->hash);
  327. reinterpreted_entry->frameCount = existing_entry->frameCount;
  328. reinterpreted_entry->SetNotCopy();
  329. reinterpreted_entry->is_efb_copy = existing_entry->is_efb_copy;
  330. reinterpreted_entry->may_have_overlapping_textures =
  331. existing_entry->may_have_overlapping_textures;
  332. g_gfx->BeginUtilityDrawing();
  333. g_gfx->SetAndDiscardFramebuffer(reinterpreted_entry->framebuffer.get());
  334. g_gfx->SetViewportAndScissor(reinterpreted_entry->texture->GetRect());
  335. g_gfx->SetPipeline(pipeline);
  336. g_gfx->SetTexture(0, existing_entry->texture.get());
  337. g_gfx->SetSamplerState(1, RenderState::GetPointSamplerState());
  338. g_gfx->Draw(0, 3);
  339. g_gfx->EndUtilityDrawing();
  340. reinterpreted_entry->texture->FinishedRendering();
  341. m_textures_by_address.emplace(reinterpreted_entry->addr, reinterpreted_entry);
  342. return reinterpreted_entry;
  343. }
  344. void TextureCacheBase::ScaleTextureCacheEntryTo(RcTcacheEntry& entry, u32 new_width, u32 new_height)
  345. {
  346. if (entry->GetWidth() == new_width && entry->GetHeight() == new_height)
  347. {
  348. return;
  349. }
  350. const u32 max = g_ActiveConfig.backend_info.MaxTextureSize;
  351. if (max < new_width || max < new_height)
  352. {
  353. ERROR_LOG_FMT(VIDEO, "Texture too big, width = {}, height = {}", new_width, new_height);
  354. return;
  355. }
  356. const TextureConfig newconfig(new_width, new_height, 1, entry->GetNumLayers(), 1,
  357. AbstractTextureFormat::RGBA8, AbstractTextureFlag_RenderTarget,
  358. AbstractTextureType::Texture_2DArray);
  359. std::optional<TexPoolEntry> new_texture = AllocateTexture(newconfig);
  360. if (!new_texture)
  361. {
  362. ERROR_LOG_FMT(VIDEO, "Scaling failed due to texture allocation failure");
  363. return;
  364. }
  365. // No need to convert the coordinates here since they'll be the same.
  366. g_gfx->ScaleTexture(new_texture->framebuffer.get(), new_texture->texture->GetConfig().GetRect(),
  367. entry->texture.get(), entry->texture->GetConfig().GetRect());
  368. entry->texture.swap(new_texture->texture);
  369. entry->framebuffer.swap(new_texture->framebuffer);
  370. // At this point new_texture has the old texture in it,
  371. // we can potentially reuse this, so let's move it back to the pool
  372. auto config = new_texture->texture->GetConfig();
  373. m_texture_pool.emplace(
  374. config, TexPoolEntry(std::move(new_texture->texture), std::move(new_texture->framebuffer)));
  375. }
  376. bool TextureCacheBase::CheckReadbackTexture(u32 width, u32 height, AbstractTextureFormat format)
  377. {
  378. if (m_readback_texture && m_readback_texture->GetConfig().width >= width &&
  379. m_readback_texture->GetConfig().height >= height &&
  380. m_readback_texture->GetConfig().format == format)
  381. {
  382. return true;
  383. }
  384. TextureConfig staging_config(std::max(width, 128u), std::max(height, 128u), 1, 1, 1, format, 0,
  385. AbstractTextureType::Texture_2DArray);
  386. m_readback_texture.reset();
  387. m_readback_texture = g_gfx->CreateStagingTexture(StagingTextureType::Readback, staging_config);
  388. return m_readback_texture != nullptr;
  389. }
  390. void TextureCacheBase::SerializeTexture(AbstractTexture* tex, const TextureConfig& config,
  391. PointerWrap& p)
  392. {
  393. // If we're in measure mode, skip the actual readback to save some time.
  394. const bool skip_readback = p.IsMeasureMode();
  395. p.Do(config);
  396. if (skip_readback || CheckReadbackTexture(config.width, config.height, config.format))
  397. {
  398. // First, measure the amount of memory needed.
  399. u32 total_size = 0;
  400. for (u32 layer = 0; layer < config.layers; layer++)
  401. {
  402. for (u32 level = 0; level < config.levels; level++)
  403. {
  404. u32 level_width = std::max(config.width >> level, 1u);
  405. u32 level_height = std::max(config.height >> level, 1u);
  406. u32 stride = AbstractTexture::CalculateStrideForFormat(config.format, level_width);
  407. u32 size = stride * level_height;
  408. total_size += size;
  409. }
  410. }
  411. // Set aside total_size bytes of space for the textures.
  412. // When measuring, this will be set aside and not written to,
  413. // but when writing we'll use this pointer directly to avoid
  414. // needing to allocate/free an extra buffer.
  415. u8* texture_data = p.DoExternal(total_size);
  416. if (!skip_readback && p.IsMeasureMode())
  417. {
  418. ERROR_LOG_FMT(VIDEO, "Couldn't acquire {} bytes for serializing texture.", total_size);
  419. return;
  420. }
  421. if (!skip_readback)
  422. {
  423. // Save out each layer of the texture to the pointer.
  424. for (u32 layer = 0; layer < config.layers; layer++)
  425. {
  426. for (u32 level = 0; level < config.levels; level++)
  427. {
  428. u32 level_width = std::max(config.width >> level, 1u);
  429. u32 level_height = std::max(config.height >> level, 1u);
  430. auto rect = tex->GetConfig().GetMipRect(level);
  431. m_readback_texture->CopyFromTexture(tex, rect, layer, level, rect);
  432. u32 stride = AbstractTexture::CalculateStrideForFormat(config.format, level_width);
  433. u32 size = stride * level_height;
  434. m_readback_texture->ReadTexels(rect, texture_data, stride);
  435. texture_data += size;
  436. }
  437. }
  438. }
  439. }
  440. else
  441. {
  442. PanicAlertFmt("Failed to create staging texture for serialization");
  443. }
  444. }
  445. std::optional<TextureCacheBase::TexPoolEntry> TextureCacheBase::DeserializeTexture(PointerWrap& p)
  446. {
  447. TextureConfig config;
  448. p.Do(config);
  449. // Read in the size from the save state, then texture data will point to
  450. // a region of size total_size where textures are stored.
  451. u32 total_size = 0;
  452. u8* texture_data = p.DoExternal(total_size);
  453. if (!p.IsReadMode() || total_size == 0)
  454. return std::nullopt;
  455. auto tex = AllocateTexture(config);
  456. if (!tex)
  457. {
  458. PanicAlertFmt("Failed to create texture for deserialization");
  459. return std::nullopt;
  460. }
  461. size_t start = 0;
  462. for (u32 layer = 0; layer < config.layers; layer++)
  463. {
  464. for (u32 level = 0; level < config.levels; level++)
  465. {
  466. const u32 level_width = std::max(config.width >> level, 1u);
  467. const u32 level_height = std::max(config.height >> level, 1u);
  468. const size_t stride = AbstractTexture::CalculateStrideForFormat(config.format, level_width);
  469. const size_t size = stride * level_height;
  470. if ((start + size) > total_size)
  471. {
  472. ERROR_LOG_FMT(VIDEO, "Insufficient texture data for layer {} level {}", layer, level);
  473. return tex;
  474. }
  475. tex->texture->Load(level, level_width, level_height, level_width, &texture_data[start], size);
  476. start += size;
  477. }
  478. }
  479. return tex;
  480. }
  481. void TextureCacheBase::DoState(PointerWrap& p)
  482. {
  483. // Flush all pending XFB copies before either loading or saving.
  484. FlushEFBCopies();
  485. p.Do(m_last_entry_id);
  486. if (p.IsWriteMode() || p.IsMeasureMode())
  487. DoSaveState(p);
  488. else
  489. DoLoadState(p);
  490. }
  491. void TextureCacheBase::DoSaveState(PointerWrap& p)
  492. {
  493. // Flush all stale binds
  494. FlushStaleBinds();
  495. std::map<const TCacheEntry*, u32> entry_map;
  496. std::vector<TCacheEntry*> entries_to_save;
  497. auto ShouldSaveEntry = [](const RcTcacheEntry& entry) {
  498. // We skip non-copies as they can be decoded from RAM when the state is loaded.
  499. // Storing them would duplicate data in the save state file, adding to decompression time.
  500. // We also need to store invalidated entires, as they can't be restored from RAM.
  501. return entry->IsCopy() || entry->invalidated;
  502. };
  503. auto AddCacheEntryToMap = [&entry_map, &entries_to_save](const RcTcacheEntry& entry) -> u32 {
  504. auto iter = entry_map.find(entry.get());
  505. if (iter != entry_map.end())
  506. return iter->second;
  507. // Since we are sequentially allocating texture entries, we need to save the textures in the
  508. // same order they were collected. This is because of iterating both the address and hash maps.
  509. // Therefore, the map is used for fast lookup, and the vector for ordering.
  510. u32 id = static_cast<u32>(entry_map.size());
  511. entry_map.emplace(entry.get(), id);
  512. entries_to_save.push_back(entry.get());
  513. return id;
  514. };
  515. auto GetCacheEntryId = [&entry_map](const TCacheEntry* entry) -> std::optional<u32> {
  516. auto iter = entry_map.find(entry);
  517. return iter != entry_map.end() ? std::make_optional(iter->second) : std::nullopt;
  518. };
  519. // Transform the m_textures_by_address and m_textures_by_hash maps to a mapping
  520. // of address/hash to entry ID.
  521. std::vector<std::pair<u32, u32>> textures_by_address_list;
  522. std::vector<std::pair<u64, u32>> textures_by_hash_list;
  523. std::vector<std::pair<u32, u32>> bound_textures_list;
  524. if (Config::Get(Config::GFX_SAVE_TEXTURE_CACHE_TO_STATE))
  525. {
  526. for (const auto& it : m_textures_by_address)
  527. {
  528. if (ShouldSaveEntry(it.second))
  529. {
  530. const u32 id = AddCacheEntryToMap(it.second);
  531. textures_by_address_list.emplace_back(it.first, id);
  532. }
  533. }
  534. for (const auto& it : m_textures_by_hash)
  535. {
  536. if (ShouldSaveEntry(it.second))
  537. {
  538. const u32 id = AddCacheEntryToMap(it.second);
  539. textures_by_hash_list.emplace_back(it.first, id);
  540. }
  541. }
  542. for (u32 i = 0; i < m_bound_textures.size(); i++)
  543. {
  544. const auto& tentry = m_bound_textures[i];
  545. if (m_bound_textures[i] && ShouldSaveEntry(tentry))
  546. {
  547. const u32 id = AddCacheEntryToMap(tentry);
  548. bound_textures_list.emplace_back(i, id);
  549. }
  550. }
  551. }
  552. // Save the texture cache entries out in the order the were referenced.
  553. u32 size = static_cast<u32>(entries_to_save.size());
  554. p.Do(size);
  555. for (TCacheEntry* entry : entries_to_save)
  556. {
  557. SerializeTexture(entry->texture.get(), entry->texture->GetConfig(), p);
  558. entry->DoState(p);
  559. }
  560. p.DoMarker("TextureCacheEntries");
  561. // Save references for each cache entry.
  562. // As references are circular, we need to have everything created before linking entries.
  563. std::set<std::pair<u32, u32>> reference_pairs;
  564. for (const auto& it : entry_map)
  565. {
  566. const TCacheEntry* entry = it.first;
  567. auto id1 = GetCacheEntryId(entry);
  568. if (!id1)
  569. continue;
  570. for (const TCacheEntry* referenced_entry : entry->references)
  571. {
  572. auto id2 = GetCacheEntryId(referenced_entry);
  573. if (!id2)
  574. continue;
  575. auto refpair1 = std::make_pair(*id1, *id2);
  576. auto refpair2 = std::make_pair(*id2, *id1);
  577. if (!reference_pairs.contains(refpair1) && !reference_pairs.contains(refpair2))
  578. reference_pairs.insert(refpair1);
  579. }
  580. }
  581. auto doList = [&p](auto list) {
  582. u32 list_size = static_cast<u32>(list.size());
  583. p.Do(list_size);
  584. for (const auto& it : list)
  585. {
  586. p.Do(it.first);
  587. p.Do(it.second);
  588. }
  589. };
  590. doList(reference_pairs);
  591. doList(textures_by_address_list);
  592. doList(textures_by_hash_list);
  593. doList(bound_textures_list);
  594. // Free the readback texture to potentially save host-mapped GPU memory, depending on where
  595. // the driver mapped the staging buffer.
  596. m_readback_texture.reset();
  597. }
  598. void TextureCacheBase::DoLoadState(PointerWrap& p)
  599. {
  600. // Helper for getting a cache entry from an ID.
  601. std::map<u32, RcTcacheEntry> id_map;
  602. RcTcacheEntry null_entry;
  603. auto GetEntry = [&id_map, &null_entry](u32 id) -> RcTcacheEntry& {
  604. auto iter = id_map.find(id);
  605. return iter == id_map.end() ? null_entry : iter->second;
  606. };
  607. // Only clear out state when actually restoring/loading.
  608. // Since we throw away entries when not in loading mode now, we don't need to check
  609. // before inserting entries into the cache, as GetEntry will always return null.
  610. const bool commit_state = p.IsReadMode();
  611. if (commit_state)
  612. Invalidate();
  613. // Preload all cache entries.
  614. u32 size = 0;
  615. p.Do(size);
  616. for (u32 i = 0; i < size; i++)
  617. {
  618. // Even if the texture isn't valid, we still need to create the cache entry object
  619. // to update the point in the state state. We'll just throw it away if it's invalid.
  620. auto tex = DeserializeTexture(p);
  621. auto entry =
  622. std::make_shared<TCacheEntry>(std::move(tex->texture), std::move(tex->framebuffer));
  623. entry->textures_by_hash_iter = m_textures_by_hash.end();
  624. entry->DoState(p);
  625. if (entry->texture && commit_state)
  626. id_map.emplace(i, entry);
  627. }
  628. p.DoMarker("TextureCacheEntries");
  629. // Link all cache entry references.
  630. p.Do(size);
  631. for (u32 i = 0; i < size; i++)
  632. {
  633. u32 id1 = 0, id2 = 0;
  634. p.Do(id1);
  635. p.Do(id2);
  636. auto e1 = GetEntry(id1);
  637. auto e2 = GetEntry(id2);
  638. if (e1 && e2)
  639. e1->CreateReference(e2.get());
  640. }
  641. // Fill in address map.
  642. p.Do(size);
  643. for (u32 i = 0; i < size; i++)
  644. {
  645. u32 addr = 0;
  646. u32 id = 0;
  647. p.Do(addr);
  648. p.Do(id);
  649. auto& entry = GetEntry(id);
  650. if (entry)
  651. m_textures_by_address.emplace(addr, entry);
  652. }
  653. // Fill in hash map.
  654. p.Do(size);
  655. for (u32 i = 0; i < size; i++)
  656. {
  657. u64 hash = 0;
  658. u32 id = 0;
  659. p.Do(hash);
  660. p.Do(id);
  661. auto& entry = GetEntry(id);
  662. if (entry)
  663. entry->textures_by_hash_iter = m_textures_by_hash.emplace(hash, entry);
  664. }
  665. // Clear bound textures
  666. for (u32 i = 0; i < m_bound_textures.size(); i++)
  667. m_bound_textures[i].reset();
  668. // Fill in bound textures
  669. p.Do(size);
  670. for (u32 i = 0; i < size; i++)
  671. {
  672. u32 index = 0;
  673. u32 id = 0;
  674. p.Do(index);
  675. p.Do(id);
  676. auto& entry = GetEntry(id);
  677. if (entry)
  678. m_bound_textures[index] = entry;
  679. }
  680. }
  681. void TextureCacheBase::OnFrameEnd()
  682. {
  683. // Flush any outstanding EFB copies to RAM, in case the game is running at an uncapped frame
  684. // rate and not waiting for vblank. Otherwise, we'd end up with a huge list of pending
  685. // copies.
  686. FlushEFBCopies();
  687. Cleanup(g_presenter->FrameCount());
  688. }
  689. void TCacheEntry::DoState(PointerWrap& p)
  690. {
  691. p.Do(addr);
  692. p.Do(size_in_bytes);
  693. p.Do(base_hash);
  694. p.Do(hash);
  695. p.Do(format);
  696. p.Do(memory_stride);
  697. p.Do(is_efb_copy);
  698. p.Do(is_custom_tex);
  699. p.Do(may_have_overlapping_textures);
  700. p.Do(invalidated);
  701. p.Do(has_arbitrary_mips);
  702. p.Do(should_force_safe_hashing);
  703. p.Do(is_xfb_copy);
  704. p.Do(is_xfb_container);
  705. p.Do(id);
  706. p.Do(reference_changed);
  707. p.Do(native_width);
  708. p.Do(native_height);
  709. p.Do(native_levels);
  710. p.Do(frameCount);
  711. }
  712. RcTcacheEntry TextureCacheBase::DoPartialTextureUpdates(RcTcacheEntry& entry_to_update,
  713. const u8* palette, TLUTFormat tlutfmt)
  714. {
  715. // If the flag may_have_overlapping_textures is cleared, there are no overlapping EFB copies,
  716. // which aren't applied already. It is set for new textures, and for the affected range
  717. // on each EFB copy.
  718. if (!entry_to_update->may_have_overlapping_textures)
  719. return entry_to_update;
  720. entry_to_update->may_have_overlapping_textures = false;
  721. const bool isPaletteTexture = IsColorIndexed(entry_to_update->format.texfmt);
  722. // EFB copies are excluded from these updates, until there's an example where a game would
  723. // benefit from updating. This would require more work to be done.
  724. if (entry_to_update->IsCopy())
  725. return entry_to_update;
  726. if (entry_to_update->IsLocked())
  727. {
  728. // TODO: Shouldn't be too hard, just need to clone the texture entry + texture contents.
  729. PanicAlertFmt("TextureCache: PartialTextureUpdates of locked textures is not implemented");
  730. return {};
  731. }
  732. u32 block_width = TexDecoder_GetBlockWidthInTexels(entry_to_update->format.texfmt);
  733. u32 block_height = TexDecoder_GetBlockHeightInTexels(entry_to_update->format.texfmt);
  734. u32 block_size = block_width * block_height *
  735. TexDecoder_GetTexelSizeInNibbles(entry_to_update->format.texfmt) / 2;
  736. u32 numBlocksX = (entry_to_update->native_width + block_width - 1) / block_width;
  737. auto iter = FindOverlappingTextures(entry_to_update->addr, entry_to_update->size_in_bytes);
  738. while (iter.first != iter.second)
  739. {
  740. auto& entry = iter.first->second;
  741. if (entry != entry_to_update && entry->IsCopy() &&
  742. !entry->references.contains(entry_to_update.get()) &&
  743. entry->OverlapsMemoryRange(entry_to_update->addr, entry_to_update->size_in_bytes) &&
  744. entry->memory_stride == numBlocksX * block_size)
  745. {
  746. if (entry->hash == entry->CalculateHash())
  747. {
  748. // If the texture formats are not compatible or convertible, skip it.
  749. if (!IsCompatibleTextureFormat(entry_to_update->format.texfmt, entry->format.texfmt))
  750. {
  751. if (!CanReinterpretTextureOnGPU(entry_to_update->format.texfmt, entry->format.texfmt))
  752. {
  753. ++iter.first;
  754. continue;
  755. }
  756. auto reinterpreted_entry = ReinterpretEntry(entry, entry_to_update->format.texfmt);
  757. if (reinterpreted_entry)
  758. entry = reinterpreted_entry;
  759. }
  760. if (isPaletteTexture)
  761. {
  762. auto decoded_entry = ApplyPaletteToEntry(entry, palette, tlutfmt);
  763. if (decoded_entry)
  764. {
  765. // Link the efb copy with the partially updated texture, so we won't apply this partial
  766. // update again
  767. entry->CreateReference(entry_to_update.get());
  768. // Mark the texture update as used, as if it was loaded directly
  769. entry->frameCount = FRAMECOUNT_INVALID;
  770. entry = decoded_entry;
  771. }
  772. else
  773. {
  774. ++iter.first;
  775. continue;
  776. }
  777. }
  778. u32 src_x, src_y, dst_x, dst_y;
  779. // Note for understanding the math:
  780. // Normal textures can't be strided, so the 2 missing cases with src_x > 0 don't exist
  781. if (entry->addr >= entry_to_update->addr)
  782. {
  783. u32 block_offset = (entry->addr - entry_to_update->addr) / block_size;
  784. u32 block_x = block_offset % numBlocksX;
  785. u32 block_y = block_offset / numBlocksX;
  786. src_x = 0;
  787. src_y = 0;
  788. dst_x = block_x * block_width;
  789. dst_y = block_y * block_height;
  790. }
  791. else
  792. {
  793. u32 block_offset = (entry_to_update->addr - entry->addr) / block_size;
  794. u32 block_x = (~block_offset + 1) % numBlocksX;
  795. u32 block_y = (block_offset + block_x) / numBlocksX;
  796. src_x = 0;
  797. src_y = block_y * block_height;
  798. dst_x = block_x * block_width;
  799. dst_y = 0;
  800. }
  801. u32 copy_width =
  802. std::min(entry->native_width - src_x, entry_to_update->native_width - dst_x);
  803. u32 copy_height =
  804. std::min(entry->native_height - src_y, entry_to_update->native_height - dst_y);
  805. // If one of the textures is scaled, scale both with the current efb scaling factor
  806. if (entry_to_update->native_width != entry_to_update->GetWidth() ||
  807. entry_to_update->native_height != entry_to_update->GetHeight() ||
  808. entry->native_width != entry->GetWidth() || entry->native_height != entry->GetHeight())
  809. {
  810. ScaleTextureCacheEntryTo(
  811. entry_to_update, g_framebuffer_manager->EFBToScaledX(entry_to_update->native_width),
  812. g_framebuffer_manager->EFBToScaledY(entry_to_update->native_height));
  813. ScaleTextureCacheEntryTo(entry, g_framebuffer_manager->EFBToScaledX(entry->native_width),
  814. g_framebuffer_manager->EFBToScaledY(entry->native_height));
  815. src_x = g_framebuffer_manager->EFBToScaledX(src_x);
  816. src_y = g_framebuffer_manager->EFBToScaledY(src_y);
  817. dst_x = g_framebuffer_manager->EFBToScaledX(dst_x);
  818. dst_y = g_framebuffer_manager->EFBToScaledY(dst_y);
  819. copy_width = g_framebuffer_manager->EFBToScaledX(copy_width);
  820. copy_height = g_framebuffer_manager->EFBToScaledY(copy_height);
  821. }
  822. // If the source rectangle is outside of what we actually have in VRAM, skip the copy.
  823. // The backend doesn't do any clamping, so if we don't, we'd pass out-of-range coordinates
  824. // to the graphics driver, which can cause GPU resets.
  825. if (static_cast<u32>(src_x + copy_width) > entry->GetWidth() ||
  826. static_cast<u32>(src_y + copy_height) > entry->GetHeight() ||
  827. static_cast<u32>(dst_x + copy_width) > entry_to_update->GetWidth() ||
  828. static_cast<u32>(dst_y + copy_height) > entry_to_update->GetHeight())
  829. {
  830. ++iter.first;
  831. continue;
  832. }
  833. MathUtil::Rectangle<int> srcrect, dstrect;
  834. srcrect.left = src_x;
  835. srcrect.top = src_y;
  836. srcrect.right = (src_x + copy_width);
  837. srcrect.bottom = (src_y + copy_height);
  838. dstrect.left = dst_x;
  839. dstrect.top = dst_y;
  840. dstrect.right = (dst_x + copy_width);
  841. dstrect.bottom = (dst_y + copy_height);
  842. // If one copy is stereo, and the other isn't... not much we can do here :/
  843. const u32 layers_to_copy = std::min(entry->GetNumLayers(), entry_to_update->GetNumLayers());
  844. for (u32 layer = 0; layer < layers_to_copy; layer++)
  845. {
  846. entry_to_update->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer,
  847. 0, dstrect, layer, 0);
  848. }
  849. if (isPaletteTexture)
  850. {
  851. // Remove the temporary converted texture, it won't be used anywhere else
  852. // TODO: It would be nice to convert and copy in one step, but this code path isn't common
  853. iter.first = InvalidateTexture(iter.first);
  854. continue;
  855. }
  856. else
  857. {
  858. // Link the two textures together, so we won't apply this partial update again
  859. entry->CreateReference(entry_to_update.get());
  860. // Mark the texture update as used, as if it was loaded directly
  861. entry->frameCount = FRAMECOUNT_INVALID;
  862. }
  863. }
  864. else
  865. {
  866. // If the hash does not match, this EFB copy will not be used for anything, so remove it
  867. iter.first = InvalidateTexture(iter.first);
  868. continue;
  869. }
  870. }
  871. ++iter.first;
  872. }
  873. return entry_to_update;
  874. }
  875. // Helper for checking if a BPMemory TexMode0 register is set to Point
  876. // Filtering modes. This is used to decide whether Anisotropic enhancements
  877. // are (mostly) safe in the VideoBackends.
  878. // If both the minification and magnification filters are set to POINT modes
  879. // then applying anisotropic filtering is equivalent to forced filtering. Point
  880. // mode textures are usually some sort of 2D UI billboard which will end up
  881. // misaligned from the correct pixels when filtered anisotropically.
  882. static bool IsAnisostropicEnhancementSafe(const TexMode0& tm0)
  883. {
  884. return !(tm0.min_filter == FilterMode::Near && tm0.mag_filter == FilterMode::Near);
  885. }
  886. SamplerState TextureCacheBase::GetSamplerState(u32 index, float custom_tex_scale, bool custom_tex,
  887. bool has_arbitrary_mips)
  888. {
  889. const TexMode0& tm0 = bpmem.tex.GetUnit(index).texMode0;
  890. SamplerState state = {};
  891. state.Generate(bpmem, index);
  892. // Force texture filtering config option.
  893. if (g_ActiveConfig.texture_filtering_mode == TextureFilteringMode::Nearest)
  894. {
  895. state.tm0.min_filter = FilterMode::Near;
  896. state.tm0.mag_filter = FilterMode::Near;
  897. state.tm0.mipmap_filter = FilterMode::Near;
  898. }
  899. else if (g_ActiveConfig.texture_filtering_mode == TextureFilteringMode::Linear)
  900. {
  901. state.tm0.min_filter = FilterMode::Linear;
  902. state.tm0.mag_filter = FilterMode::Linear;
  903. state.tm0.mipmap_filter =
  904. tm0.mipmap_filter != MipMode::None ? FilterMode::Linear : FilterMode::Near;
  905. }
  906. // Custom textures may have a greater number of mips
  907. if (custom_tex)
  908. state.tm1.max_lod = 255;
  909. // Anisotropic filtering option.
  910. if (g_ActiveConfig.iMaxAnisotropy != 0 && IsAnisostropicEnhancementSafe(tm0))
  911. {
  912. // https://www.opengl.org/registry/specs/EXT/texture_filter_anisotropic.txt
  913. // For predictable results on all hardware/drivers, only use one of:
  914. // GL_LINEAR + GL_LINEAR (No Mipmaps [Bilinear])
  915. // GL_LINEAR + GL_LINEAR_MIPMAP_LINEAR (w/ Mipmaps [Trilinear])
  916. // Letting the game set other combinations will have varying arbitrary results;
  917. // possibly being interpreted as equal to bilinear/trilinear, implicitly
  918. // disabling anisotropy, or changing the anisotropic algorithm employed.
  919. state.tm0.min_filter = FilterMode::Linear;
  920. state.tm0.mag_filter = FilterMode::Linear;
  921. if (tm0.mipmap_filter != MipMode::None)
  922. state.tm0.mipmap_filter = FilterMode::Linear;
  923. state.tm0.anisotropic_filtering = true;
  924. }
  925. else
  926. {
  927. state.tm0.anisotropic_filtering = false;
  928. }
  929. if (has_arbitrary_mips && tm0.mipmap_filter != MipMode::None)
  930. {
  931. // Apply a secondary bias calculated from the IR scale to pull inwards mipmaps
  932. // that have arbitrary contents, eg. are used for fog effects where the
  933. // distance they kick in at is important to preserve at any resolution.
  934. // Correct this with the upscaling factor of custom textures.
  935. s32 lod_offset = std::log2(g_framebuffer_manager->GetEFBScale() / custom_tex_scale) * 256.f;
  936. state.tm0.lod_bias = std::clamp<s32>(state.tm0.lod_bias + lod_offset, -32768, 32767);
  937. // Anisotropic also pushes mips farther away so it cannot be used either
  938. state.tm0.anisotropic_filtering = false;
  939. }
  940. return state;
  941. }
  942. void TextureCacheBase::BindTextures(BitSet32 used_textures,
  943. const std::array<SamplerState, 8>& samplers)
  944. {
  945. auto& system = Core::System::GetInstance();
  946. auto& pixel_shader_manager = system.GetPixelShaderManager();
  947. for (u32 i = 0; i < m_bound_textures.size(); i++)
  948. {
  949. const RcTcacheEntry& tentry = m_bound_textures[i];
  950. if (used_textures[i] && tentry)
  951. {
  952. g_gfx->SetTexture(i, tentry->texture.get());
  953. pixel_shader_manager.SetTexDims(i, tentry->native_width, tentry->native_height);
  954. auto& state = samplers[i];
  955. g_gfx->SetSamplerState(i, state);
  956. pixel_shader_manager.SetSamplerState(i, state.tm0.hex, state.tm1.hex);
  957. }
  958. }
  959. TMEM::FinalizeBinds(used_textures);
  960. }
  961. class ArbitraryMipmapDetector
  962. {
  963. private:
  964. using PixelRGBAf = std::array<float, 4>;
  965. using PixelRGBAu8 = std::array<u8, 4>;
  966. public:
  967. explicit ArbitraryMipmapDetector() = default;
  968. void AddLevel(u32 width, u32 height, u32 row_length, const u8* buffer)
  969. {
  970. levels.push_back({{width, height, row_length}, buffer});
  971. }
  972. bool HasArbitraryMipmaps(u8* downsample_buffer) const
  973. {
  974. if (levels.size() < 2)
  975. return false;
  976. if (!g_ActiveConfig.bArbitraryMipmapDetection)
  977. return false;
  978. // This is the average per-pixel, per-channel difference in percent between what we
  979. // expect a normal blurred mipmap to look like and what we actually received
  980. // 4.5% was chosen because it's just below the lowest clearly-arbitrary texture
  981. // I found in my tests, the background clouds in Mario Galaxy's Observatory lobby.
  982. const auto threshold = g_ActiveConfig.fArbitraryMipmapDetectionThreshold;
  983. auto* src = downsample_buffer;
  984. auto* dst = downsample_buffer + levels[1].shape.row_length * levels[1].shape.height * 4;
  985. float total_diff = 0.f;
  986. for (std::size_t i = 0; i < levels.size() - 1; ++i)
  987. {
  988. const auto& level = levels[i];
  989. const auto& mip = levels[i + 1];
  990. u64 level_pixel_count = level.shape.width;
  991. level_pixel_count *= level.shape.height;
  992. // AverageDiff stores the difference sum in a u64, so make sure we can't overflow
  993. ASSERT(level_pixel_count < (std::numeric_limits<u64>::max() / (255 * 255 * 4)));
  994. // Manually downsample the past downsample with a simple box blur
  995. // This is not necessarily close to whatever the original artists used, however
  996. // It should still be closer than a thing that's not a downscale at all
  997. Level::Downsample(i ? src : level.pixels, level.shape, dst, mip.shape);
  998. // Find the average difference between pixels in this level but downsampled
  999. // and the next level
  1000. auto diff = mip.AverageDiff(dst);
  1001. total_diff += diff;
  1002. std::swap(src, dst);
  1003. }
  1004. auto all_levels = total_diff / (levels.size() - 1);
  1005. return all_levels > threshold;
  1006. }
  1007. private:
  1008. struct Shape
  1009. {
  1010. u32 width;
  1011. u32 height;
  1012. u32 row_length;
  1013. };
  1014. struct Level
  1015. {
  1016. Shape shape;
  1017. const u8* pixels;
  1018. static PixelRGBAu8 SampleLinear(const u8* src, const Shape& src_shape, u32 x, u32 y)
  1019. {
  1020. const auto* p = src + (x + y * src_shape.row_length) * 4;
  1021. return {{p[0], p[1], p[2], p[3]}};
  1022. }
  1023. // Puts a downsampled image in dst. dst must be at least width*height*4
  1024. static void Downsample(const u8* src, const Shape& src_shape, u8* dst, const Shape& dst_shape)
  1025. {
  1026. for (u32 i = 0; i < dst_shape.height; ++i)
  1027. {
  1028. for (u32 j = 0; j < dst_shape.width; ++j)
  1029. {
  1030. auto x = j * 2;
  1031. auto y = i * 2;
  1032. const std::array<PixelRGBAu8, 4> samples{{
  1033. SampleLinear(src, src_shape, x, y),
  1034. SampleLinear(src, src_shape, x + 1, y),
  1035. SampleLinear(src, src_shape, x, y + 1),
  1036. SampleLinear(src, src_shape, x + 1, y + 1),
  1037. }};
  1038. auto* dst_pixel = dst + (j + i * dst_shape.row_length) * 4;
  1039. for (int channel = 0; channel < 4; channel++)
  1040. {
  1041. uint32_t channel_value = samples[0][channel] + samples[1][channel] +
  1042. samples[2][channel] + samples[3][channel];
  1043. dst_pixel[channel] = (channel_value + 2) / 4;
  1044. }
  1045. }
  1046. }
  1047. }
  1048. float AverageDiff(const u8* other) const
  1049. {
  1050. // As textures are stored in (at most) 8 bit precision, each channel can
  1051. // have a max diff of (2^8)^2, multiply by 4 channels = 2^18 per pixel.
  1052. // That means to overflow, we must have a texture with more than 2^46
  1053. // pixels - which is way beyond anything the original hardware could do,
  1054. // and likely a sane assumption going forward for some significant time.
  1055. u64 current_diff_sum = 0;
  1056. const auto* ptr1 = pixels;
  1057. const auto* ptr2 = other;
  1058. for (u32 i = 0; i < shape.height; ++i)
  1059. {
  1060. const auto* row1 = ptr1;
  1061. const auto* row2 = ptr2;
  1062. for (u32 j = 0; j < shape.width; ++j, row1 += 4, row2 += 4)
  1063. {
  1064. int pixel_diff = 0;
  1065. for (int channel = 0; channel < 4; channel++)
  1066. {
  1067. const int diff = static_cast<int>(row1[channel]) - static_cast<int>(row2[channel]);
  1068. const int diff_squared = diff * diff;
  1069. pixel_diff += diff_squared;
  1070. }
  1071. current_diff_sum += pixel_diff;
  1072. }
  1073. ptr1 += shape.row_length;
  1074. ptr2 += shape.row_length;
  1075. }
  1076. // calculate the MSE over all pixels, divide by 2.56 to make it a percent
  1077. // (IE scale to 0..100 instead of 0..256)
  1078. return std::sqrt(static_cast<float>(current_diff_sum) / (shape.width * shape.height * 4)) /
  1079. 2.56f;
  1080. }
  1081. };
  1082. std::vector<Level> levels;
  1083. };
  1084. TCacheEntry* TextureCacheBase::Load(const TextureInfo& texture_info)
  1085. {
  1086. if (auto entry = LoadImpl(texture_info, false))
  1087. {
  1088. if (!DidLinkedAssetsChange(*entry))
  1089. {
  1090. return entry;
  1091. }
  1092. InvalidateTexture(GetTexCacheIter(entry));
  1093. return LoadImpl(texture_info, true);
  1094. }
  1095. return nullptr;
  1096. }
  1097. TCacheEntry* TextureCacheBase::LoadImpl(const TextureInfo& texture_info, bool force_reload)
  1098. {
  1099. // if this stage was not invalidated by changes to texture registers, keep the current texture
  1100. if (!force_reload && TMEM::IsValid(texture_info.GetStage()) &&
  1101. m_bound_textures[texture_info.GetStage()])
  1102. {
  1103. TCacheEntry* entry = m_bound_textures[texture_info.GetStage()].get();
  1104. // If the TMEM configuration is such that this texture is more or less guaranteed to still
  1105. // be in TMEM, then we know we can reuse the old entry without even hashing the memory
  1106. //
  1107. // It's possible this texture has already been overwritten in emulated memory and therfore
  1108. // invalidated from our texture cache, but we want to use it anyway to approximate the
  1109. // result of the game using an overwritten texture cached in TMEM.
  1110. //
  1111. // Spyro: A Hero's Tail is known for (deliberately?) using such overwritten textures
  1112. // in it's bloom effect, which breaks without giving it the invalidated texture.
  1113. if (TMEM::IsCached(texture_info.GetStage()))
  1114. {
  1115. return entry;
  1116. }
  1117. // Otherwise, hash the backing memory and check it's unchanged.
  1118. // FIXME: this doesn't correctly handle textures from tmem.
  1119. if (!entry->invalidated && entry->base_hash == entry->CalculateHash())
  1120. {
  1121. return entry;
  1122. }
  1123. }
  1124. auto entry = GetTexture(g_ActiveConfig.iSafeTextureCache_ColorSamples, texture_info);
  1125. if (!entry)
  1126. return nullptr;
  1127. entry->frameCount = FRAMECOUNT_INVALID;
  1128. if (entry->texture_info_name.empty() && g_ActiveConfig.bGraphicMods)
  1129. {
  1130. entry->texture_info_name = texture_info.CalculateTextureName().GetFullName();
  1131. GraphicsModActionData::TextureLoad texture_load{entry->texture_info_name};
  1132. for (const auto& action :
  1133. g_graphics_mod_manager->GetTextureLoadActions(entry->texture_info_name))
  1134. {
  1135. action->OnTextureLoad(&texture_load);
  1136. }
  1137. }
  1138. m_bound_textures[texture_info.GetStage()] = entry;
  1139. // We need to keep track of invalided textures until they have actually been replaced or
  1140. // re-loaded
  1141. TMEM::Bind(texture_info.GetStage(), entry->NumBlocksX(), entry->NumBlocksY(),
  1142. entry->GetNumLevels() > 1, entry->format == TextureFormat::RGBA8);
  1143. return entry.get();
  1144. }
  1145. RcTcacheEntry TextureCacheBase::GetTexture(const int textureCacheSafetyColorSampleSize,
  1146. const TextureInfo& texture_info)
  1147. {
  1148. if (!texture_info.IsDataValid())
  1149. return {};
  1150. // Hash assigned to texcache entry (also used to generate filenames used for texture dumping and
  1151. // custom texture lookup)
  1152. u64 base_hash = TEXHASH_INVALID;
  1153. u64 full_hash = TEXHASH_INVALID;
  1154. TextureAndTLUTFormat full_format(texture_info.GetTextureFormat(), texture_info.GetTlutFormat());
  1155. // Reject invalid tlut format.
  1156. if (texture_info.GetPaletteSize() && !IsValidTLUTFormat(texture_info.GetTlutFormat()))
  1157. return {};
  1158. u32 bytes_per_block = (texture_info.GetBlockWidth() * texture_info.GetBlockHeight() *
  1159. TexDecoder_GetTexelSizeInNibbles(texture_info.GetTextureFormat())) /
  1160. 2;
  1161. // TODO: the texture cache lookup is based on address, but a texture from tmem has no reason
  1162. // to have a unique and valid address. This could result in a regular texture and a tmem
  1163. // texture aliasing onto the same texture cache entry.
  1164. // If we are recording a FifoLog, keep track of what memory we read. FifoRecorder does
  1165. // its own memory modification tracking independent of the texture hashing below.
  1166. if (OpcodeDecoder::g_record_fifo_data && !texture_info.IsFromTmem())
  1167. {
  1168. Core::System::GetInstance().GetFifoRecorder().UseMemory(texture_info.GetRawAddress(),
  1169. texture_info.GetFullLevelSize(),
  1170. MemoryUpdate::Type::TextureMap);
  1171. }
  1172. // TODO: This doesn't hash GB tiles for preloaded RGBA8 textures (instead, it's hashing more data
  1173. // from the low tmem bank than it should)
  1174. base_hash = Common::GetHash64(texture_info.GetData(), texture_info.GetTextureSize(),
  1175. textureCacheSafetyColorSampleSize);
  1176. u32 palette_size = 0;
  1177. if (texture_info.GetPaletteSize())
  1178. {
  1179. palette_size = *texture_info.GetPaletteSize();
  1180. full_hash =
  1181. base_hash ^ Common::GetHash64(texture_info.GetTlutAddress(), *texture_info.GetPaletteSize(),
  1182. textureCacheSafetyColorSampleSize);
  1183. }
  1184. else
  1185. {
  1186. full_hash = base_hash;
  1187. }
  1188. // Search the texture cache for textures by address
  1189. //
  1190. // Find all texture cache entries for the current texture address, and decide whether to use one
  1191. // of them, or to create a new one
  1192. //
  1193. // In most cases, the fastest way is to use only one texture cache entry for the same address.
  1194. // Usually, when a texture changes, the old version of the texture is unlikely to be used again.
  1195. // If there were new cache entries created for normal texture updates, there would be a slowdown
  1196. // due to a huge amount of unused cache entries. Also thanks to texture pooling, overwriting an
  1197. // existing cache entry is faster than creating a new one from scratch.
  1198. //
  1199. // Some games use the same address for different textures though. If the same cache entry was used
  1200. // in this case, it would be constantly overwritten, and effectively there wouldn't be any caching
  1201. // for those textures. Examples for this are Metroid Prime and Castlevania 3. Metroid Prime has
  1202. // multiple sets of fonts on each other stored in a single texture and uses the palette to make
  1203. // different characters visible or invisible. In Castlevania 3 some textures are used for 2
  1204. // different things or at least in 2 different ways (size 1024x1024 vs 1024x256).
  1205. //
  1206. // To determine whether to use multiple cache entries or a single entry, use the following
  1207. // heuristic: If the same texture address is used several times during the same frame, assume the
  1208. // address is used for different purposes and allow creating an additional cache entry. If there's
  1209. // at least one entry that hasn't been used for the same frame, then overwrite it, in order to
  1210. // keep the cache as small as possible. If the current texture is found in the cache, use that
  1211. // entry.
  1212. //
  1213. // For efb copies, the entry created in CopyRenderTargetToTexture always has to be used, or else
  1214. // it was done in vain.
  1215. auto iter_range = m_textures_by_address.equal_range(texture_info.GetRawAddress());
  1216. TexAddrCache::iterator iter = iter_range.first;
  1217. TexAddrCache::iterator oldest_entry = iter;
  1218. int temp_frameCount = 0x7fffffff;
  1219. TexAddrCache::iterator unconverted_copy = m_textures_by_address.end();
  1220. TexAddrCache::iterator unreinterpreted_copy = m_textures_by_address.end();
  1221. while (iter != iter_range.second)
  1222. {
  1223. RcTcacheEntry& entry = iter->second;
  1224. // TODO: Some games (Rogue Squadron 3, Twin Snakes) seem to load a previously made XFB
  1225. // copy as a regular texture. You can see this particularly well in RS3 whenever the
  1226. // game freezes the image and fades it out to black on screen transitions, which fades
  1227. // out a purple screen in XFB2Tex. Check for this here and convert them if necessary.
  1228. // Do not load strided EFB copies, they are not meant to be used directly.
  1229. // Also do not directly load EFB copies, which were partly overwritten.
  1230. if (entry->IsEfbCopy() && entry->native_width == texture_info.GetRawWidth() &&
  1231. entry->native_height == texture_info.GetRawHeight() &&
  1232. entry->memory_stride == entry->BytesPerRow() && !entry->may_have_overlapping_textures)
  1233. {
  1234. // EFB copies have slightly different rules as EFB copy formats have different
  1235. // meanings from texture formats.
  1236. if ((base_hash == entry->hash &&
  1237. (!texture_info.GetPaletteSize() || g_Config.backend_info.bSupportsPaletteConversion)) ||
  1238. IsPlayingBackFifologWithBrokenEFBCopies)
  1239. {
  1240. // The texture format in VRAM must match the format that the copy was created with. Some
  1241. // formats are inherently compatible, as the channel and bit layout is identical (e.g.
  1242. // I8/C8). Others have the same number of bits per texel, and can be reinterpreted on the
  1243. // GPU (e.g. IA4 and I8 or RGB565 and RGBA5). The only known game which reinteprets texels
  1244. // in this manner is Spiderman Shattered Dimensions, where it creates a copy in B8 format,
  1245. // and sets it up as a IA4 texture.
  1246. if (!IsCompatibleTextureFormat(entry->format.texfmt, texture_info.GetTextureFormat()))
  1247. {
  1248. // Can we reinterpret this in VRAM?
  1249. if (CanReinterpretTextureOnGPU(entry->format.texfmt, texture_info.GetTextureFormat()))
  1250. {
  1251. // Delay the conversion until afterwards, it's possible this texture has already been
  1252. // converted.
  1253. unreinterpreted_copy = iter++;
  1254. continue;
  1255. }
  1256. else
  1257. {
  1258. // If the EFB copies are in a different format and are not reinterpretable, use the RAM
  1259. // copy.
  1260. ++iter;
  1261. continue;
  1262. }
  1263. }
  1264. else
  1265. {
  1266. // Prefer the already-converted copy.
  1267. unconverted_copy = m_textures_by_address.end();
  1268. }
  1269. // TODO: We should check width/height/levels for EFB copies. I'm not sure what effect
  1270. // checking width/height/levels would have.
  1271. if (!texture_info.GetPaletteSize() || !g_Config.backend_info.bSupportsPaletteConversion)
  1272. return entry;
  1273. // Note that we found an unconverted EFB copy, then continue. We'll
  1274. // perform the conversion later. Currently, we only convert EFB copies to
  1275. // palette textures; we could do other conversions if it proved to be
  1276. // beneficial.
  1277. unconverted_copy = iter;
  1278. }
  1279. else
  1280. {
  1281. // Aggressively prune EFB copies: if it isn't useful here, it will probably
  1282. // never be useful again. It's theoretically possible for a game to do
  1283. // something weird where the copy could become useful in the future, but in
  1284. // practice it doesn't happen.
  1285. iter = InvalidateTexture(iter);
  1286. continue;
  1287. }
  1288. }
  1289. else
  1290. {
  1291. // For normal textures, all texture parameters need to match
  1292. if (!entry->IsEfbCopy() && entry->hash == full_hash && entry->format == full_format &&
  1293. entry->native_levels >= texture_info.GetLevelCount() &&
  1294. entry->native_width == texture_info.GetRawWidth() &&
  1295. entry->native_height == texture_info.GetRawHeight())
  1296. {
  1297. entry = DoPartialTextureUpdates(iter->second, texture_info.GetTlutAddress(),
  1298. texture_info.GetTlutFormat());
  1299. if (entry)
  1300. {
  1301. entry->texture->FinishedRendering();
  1302. return entry;
  1303. }
  1304. }
  1305. }
  1306. // Find the texture which hasn't been used for the longest time. Count paletted
  1307. // textures as the same texture here, when the texture itself is the same. This
  1308. // improves the performance a lot in some games that use paletted textures.
  1309. // Example: Sonic the Fighters (inside Sonic Gems Collection)
  1310. // Skip EFB copies here, so they can be used for partial texture updates
  1311. // Also skip XFB copies, we might need to still scan them out
  1312. // or load them as regular textures later.
  1313. if (entry->frameCount != FRAMECOUNT_INVALID && entry->frameCount < temp_frameCount &&
  1314. !entry->IsCopy() && !(texture_info.GetPaletteSize() && entry->base_hash == base_hash))
  1315. {
  1316. temp_frameCount = entry->frameCount;
  1317. oldest_entry = iter;
  1318. }
  1319. ++iter;
  1320. }
  1321. if (unreinterpreted_copy != m_textures_by_address.end())
  1322. {
  1323. auto decoded_entry =
  1324. ReinterpretEntry(unreinterpreted_copy->second, texture_info.GetTextureFormat());
  1325. // It's possible to combine reinterpreted textures + palettes.
  1326. if (unreinterpreted_copy == unconverted_copy && decoded_entry)
  1327. decoded_entry = ApplyPaletteToEntry(decoded_entry, texture_info.GetTlutAddress(),
  1328. texture_info.GetTlutFormat());
  1329. if (decoded_entry)
  1330. return decoded_entry;
  1331. }
  1332. if (unconverted_copy != m_textures_by_address.end())
  1333. {
  1334. auto decoded_entry = ApplyPaletteToEntry(
  1335. unconverted_copy->second, texture_info.GetTlutAddress(), texture_info.GetTlutFormat());
  1336. if (decoded_entry)
  1337. {
  1338. return decoded_entry;
  1339. }
  1340. }
  1341. // Search the texture cache for normal textures by hash
  1342. //
  1343. // If the texture was fully hashed, the address does not need to match. Identical duplicate
  1344. // textures cause unnecessary slowdowns
  1345. // Example: Tales of Symphonia (GC) uses over 500 small textures in menus, but only around 70
  1346. // different ones
  1347. if (textureCacheSafetyColorSampleSize == 0 ||
  1348. std::max(texture_info.GetTextureSize(), palette_size) <=
  1349. (u32)textureCacheSafetyColorSampleSize * 8)
  1350. {
  1351. auto hash_range = m_textures_by_hash.equal_range(full_hash);
  1352. TexHashCache::iterator hash_iter = hash_range.first;
  1353. while (hash_iter != hash_range.second)
  1354. {
  1355. RcTcacheEntry& entry = hash_iter->second;
  1356. // All parameters, except the address, need to match here
  1357. if (entry->format == full_format && entry->native_levels >= texture_info.GetLevelCount() &&
  1358. entry->native_width == texture_info.GetRawWidth() &&
  1359. entry->native_height == texture_info.GetRawHeight())
  1360. {
  1361. entry = DoPartialTextureUpdates(hash_iter->second, texture_info.GetTlutAddress(),
  1362. texture_info.GetTlutFormat());
  1363. if (entry)
  1364. {
  1365. entry->texture->FinishedRendering();
  1366. return entry;
  1367. }
  1368. }
  1369. ++hash_iter;
  1370. }
  1371. }
  1372. // If at least one entry was not used for the same frame, overwrite the oldest one
  1373. if (temp_frameCount != 0x7fffffff)
  1374. {
  1375. // pool this texture and make a new one later
  1376. InvalidateTexture(oldest_entry);
  1377. }
  1378. std::vector<VideoCommon::CachedAsset<VideoCommon::GameTextureAsset>> cached_game_assets;
  1379. std::vector<std::shared_ptr<VideoCommon::TextureData>> data_for_assets;
  1380. bool has_arbitrary_mipmaps = false;
  1381. bool skip_texture_dump = false;
  1382. std::shared_ptr<HiresTexture> hires_texture;
  1383. if (g_ActiveConfig.bHiresTextures)
  1384. {
  1385. hires_texture = HiresTexture::Search(texture_info);
  1386. if (hires_texture)
  1387. {
  1388. auto asset = hires_texture->GetAsset();
  1389. const auto loaded_time = asset->GetLastLoadedTime();
  1390. cached_game_assets.push_back(
  1391. VideoCommon::CachedAsset<VideoCommon::GameTextureAsset>{std::move(asset), loaded_time});
  1392. has_arbitrary_mipmaps = hires_texture->HasArbitraryMipmaps();
  1393. skip_texture_dump = true;
  1394. }
  1395. }
  1396. std::vector<VideoCommon::CachedAsset<VideoCommon::CustomAsset>> additional_dependencies;
  1397. std::string texture_name = "";
  1398. if (g_ActiveConfig.bGraphicMods)
  1399. {
  1400. u32 height = texture_info.GetRawHeight();
  1401. u32 width = texture_info.GetRawWidth();
  1402. if (hires_texture)
  1403. {
  1404. auto asset = hires_texture->GetAsset();
  1405. if (asset)
  1406. {
  1407. auto data = asset->GetData();
  1408. if (data)
  1409. {
  1410. if (!data->m_texture.m_slices.empty())
  1411. {
  1412. if (!data->m_texture.m_slices[0].m_levels.empty())
  1413. {
  1414. height = data->m_texture.m_slices[0].m_levels[0].height;
  1415. width = data->m_texture.m_slices[0].m_levels[0].width;
  1416. }
  1417. }
  1418. }
  1419. }
  1420. }
  1421. texture_name = texture_info.CalculateTextureName().GetFullName();
  1422. GraphicsModActionData::TextureCreate texture_create{
  1423. texture_name, width, height, &cached_game_assets, &additional_dependencies};
  1424. for (const auto& action : g_graphics_mod_manager->GetTextureCreateActions(texture_name))
  1425. {
  1426. action->OnTextureCreate(&texture_create);
  1427. }
  1428. }
  1429. data_for_assets.reserve(cached_game_assets.size());
  1430. for (auto& cached_asset : cached_game_assets)
  1431. {
  1432. auto data = cached_asset.m_asset->GetData();
  1433. if (data)
  1434. {
  1435. if (cached_asset.m_asset->Validate(texture_info.GetRawWidth(), texture_info.GetRawHeight()))
  1436. {
  1437. data_for_assets.push_back(data);
  1438. }
  1439. }
  1440. }
  1441. auto entry =
  1442. CreateTextureEntry(TextureCreationInfo{base_hash, full_hash, bytes_per_block, palette_size},
  1443. texture_info, textureCacheSafetyColorSampleSize,
  1444. std::move(data_for_assets), has_arbitrary_mipmaps, skip_texture_dump);
  1445. entry->linked_game_texture_assets = std::move(cached_game_assets);
  1446. entry->linked_asset_dependencies = std::move(additional_dependencies);
  1447. entry->texture_info_name = std::move(texture_name);
  1448. return entry;
  1449. }
  1450. // Note: the following function assumes all CustomTextureData has a single slice. This is verified
  1451. // with the 'GameTexture::Validate' function after the data is loaded. Only a single slice is
  1452. // expected because each texture is loaded into a texture array
  1453. RcTcacheEntry TextureCacheBase::CreateTextureEntry(
  1454. const TextureCreationInfo& creation_info, const TextureInfo& texture_info,
  1455. const int safety_color_sample_size,
  1456. std::vector<std::shared_ptr<VideoCommon::TextureData>> assets_data,
  1457. const bool custom_arbitrary_mipmaps, bool skip_texture_dump)
  1458. {
  1459. #ifdef __APPLE__
  1460. const bool no_mips = g_ActiveConfig.bNoMipmapping;
  1461. #else
  1462. const bool no_mips = false;
  1463. #endif
  1464. RcTcacheEntry entry;
  1465. if (!assets_data.empty())
  1466. {
  1467. const auto calculate_max_levels = [&]() {
  1468. const auto max_element = std::max_element(
  1469. assets_data.begin(), assets_data.end(), [](const auto& lhs, const auto& rhs) {
  1470. return lhs->m_texture.m_slices[0].m_levels.size() <
  1471. rhs->m_texture.m_slices[0].m_levels.size();
  1472. });
  1473. return (*max_element)->m_texture.m_slices[0].m_levels.size();
  1474. };
  1475. const u32 texLevels = no_mips ? 1 : (u32)calculate_max_levels();
  1476. const auto& first_level = assets_data[0]->m_texture.m_slices[0].m_levels[0];
  1477. const TextureConfig config(first_level.width, first_level.height, texLevels,
  1478. static_cast<u32>(assets_data.size()), 1, first_level.format, 0,
  1479. AbstractTextureType::Texture_2DArray);
  1480. entry = AllocateCacheEntry(config);
  1481. if (!entry) [[unlikely]]
  1482. return entry;
  1483. for (u32 data_index = 0; data_index < static_cast<u32>(assets_data.size()); data_index++)
  1484. {
  1485. const auto& asset = assets_data[data_index];
  1486. const auto& slice = asset->m_texture.m_slices[0];
  1487. for (u32 level_index = 0;
  1488. level_index < std::min(texLevels, static_cast<u32>(slice.m_levels.size()));
  1489. ++level_index)
  1490. {
  1491. const auto& level = slice.m_levels[level_index];
  1492. entry->texture->Load(level_index, level.width, level.height, level.row_length,
  1493. level.data.data(), level.data.size(), data_index);
  1494. }
  1495. }
  1496. entry->has_arbitrary_mips = custom_arbitrary_mipmaps;
  1497. entry->is_custom_tex = true;
  1498. }
  1499. else
  1500. {
  1501. const u32 texLevels = no_mips ? 1 : texture_info.GetLevelCount();
  1502. const u32 expanded_width = texture_info.GetExpandedWidth();
  1503. const u32 expanded_height = texture_info.GetExpandedHeight();
  1504. const u32 width = texture_info.GetRawWidth();
  1505. const u32 height = texture_info.GetRawHeight();
  1506. const TextureConfig config(width, height, texLevels, 1, 1, AbstractTextureFormat::RGBA8, 0,
  1507. AbstractTextureType::Texture_2DArray);
  1508. entry = AllocateCacheEntry(config);
  1509. if (!entry) [[unlikely]]
  1510. return entry;
  1511. // We can decode on the GPU if it is a supported format and the flag is enabled.
  1512. // Currently we don't decode RGBA8 textures from TMEM, as that would require copying from both
  1513. // banks, and if we're doing an copy we may as well just do the whole thing on the CPU, since
  1514. // there's no conversion between formats. In the future this could be extended with a separate
  1515. // shader, however.
  1516. const bool decode_on_gpu =
  1517. g_ActiveConfig.UseGPUTextureDecoding() &&
  1518. !(texture_info.IsFromTmem() && texture_info.GetTextureFormat() == TextureFormat::RGBA8);
  1519. ArbitraryMipmapDetector arbitrary_mip_detector;
  1520. // Initialized to null because only software loading uses this buffer
  1521. u8* dst_buffer = nullptr;
  1522. if (!decode_on_gpu ||
  1523. !DecodeTextureOnGPU(
  1524. entry, 0, texture_info.GetData(), texture_info.GetTextureSize(),
  1525. texture_info.GetTextureFormat(), width, height, expanded_width, expanded_height,
  1526. creation_info.bytes_per_block * (expanded_width / texture_info.GetBlockWidth()),
  1527. texture_info.GetTlutAddress(), texture_info.GetTlutFormat()))
  1528. {
  1529. size_t decoded_texture_size = expanded_width * sizeof(u32) * expanded_height;
  1530. // Allocate memory for all levels at once
  1531. size_t total_texture_size = decoded_texture_size;
  1532. // For the downsample, we need 2 buffers; 1 is 1/4 of the original texture, the other 1/16
  1533. size_t mip_downsample_buffer_size = decoded_texture_size * 5 / 16;
  1534. size_t prev_level_size = decoded_texture_size;
  1535. for (u32 i = 1; i < texture_info.GetLevelCount(); ++i)
  1536. {
  1537. prev_level_size /= 4;
  1538. total_texture_size += prev_level_size;
  1539. }
  1540. // Add space for the downsampling at the end
  1541. total_texture_size += mip_downsample_buffer_size;
  1542. CheckTempSize(total_texture_size);
  1543. dst_buffer = m_temp;
  1544. if (!(texture_info.GetTextureFormat() == TextureFormat::RGBA8 && texture_info.IsFromTmem()))
  1545. {
  1546. TexDecoder_Decode(dst_buffer, texture_info.GetData(), expanded_width, expanded_height,
  1547. texture_info.GetTextureFormat(), texture_info.GetTlutAddress(),
  1548. texture_info.GetTlutFormat());
  1549. }
  1550. else
  1551. {
  1552. TexDecoder_DecodeRGBA8FromTmem(dst_buffer, texture_info.GetData(),
  1553. texture_info.GetTmemOddAddress(), expanded_width,
  1554. expanded_height);
  1555. }
  1556. entry->texture->Load(0, width, height, expanded_width, dst_buffer, decoded_texture_size);
  1557. arbitrary_mip_detector.AddLevel(width, height, expanded_width, dst_buffer);
  1558. dst_buffer += decoded_texture_size;
  1559. }
  1560. for (u32 level = 1; level != texLevels; ++level)
  1561. {
  1562. auto mip_level = texture_info.GetMipMapLevel(level - 1);
  1563. if (!mip_level)
  1564. continue;
  1565. if (!decode_on_gpu ||
  1566. !DecodeTextureOnGPU(entry, level, mip_level->GetData(), mip_level->GetTextureSize(),
  1567. texture_info.GetTextureFormat(), mip_level->GetRawWidth(),
  1568. mip_level->GetRawHeight(), mip_level->GetExpandedWidth(),
  1569. mip_level->GetExpandedHeight(),
  1570. creation_info.bytes_per_block *
  1571. (mip_level->GetExpandedWidth() / texture_info.GetBlockWidth()),
  1572. texture_info.GetTlutAddress(), texture_info.GetTlutFormat()))
  1573. {
  1574. // No need to call CheckTempSize here, as the whole buffer is preallocated at the beginning
  1575. const u32 decoded_mip_size =
  1576. mip_level->GetExpandedWidth() * sizeof(u32) * mip_level->GetExpandedHeight();
  1577. TexDecoder_Decode(dst_buffer, mip_level->GetData(), mip_level->GetExpandedWidth(),
  1578. mip_level->GetExpandedHeight(), texture_info.GetTextureFormat(),
  1579. texture_info.GetTlutAddress(), texture_info.GetTlutFormat());
  1580. entry->texture->Load(level, mip_level->GetRawWidth(), mip_level->GetRawHeight(),
  1581. mip_level->GetExpandedWidth(), dst_buffer, decoded_mip_size);
  1582. arbitrary_mip_detector.AddLevel(mip_level->GetRawWidth(), mip_level->GetRawHeight(),
  1583. mip_level->GetExpandedWidth(), dst_buffer);
  1584. dst_buffer += decoded_mip_size;
  1585. }
  1586. }
  1587. entry->has_arbitrary_mips = arbitrary_mip_detector.HasArbitraryMipmaps(dst_buffer);
  1588. if (g_ActiveConfig.bDumpTextures && !skip_texture_dump && texLevels > 0)
  1589. {
  1590. const std::string basename = texture_info.CalculateTextureName().GetFullName();
  1591. if (g_ActiveConfig.bDumpBaseTextures)
  1592. {
  1593. m_texture_dumper.DumpTexture(*entry->texture, basename, 0, entry->has_arbitrary_mips);
  1594. }
  1595. if (g_ActiveConfig.bDumpMipmapTextures)
  1596. {
  1597. for (u32 level = 1; level < texLevels; ++level)
  1598. {
  1599. m_texture_dumper.DumpTexture(*entry->texture, basename, level, entry->has_arbitrary_mips);
  1600. }
  1601. }
  1602. }
  1603. }
  1604. const auto iter = m_textures_by_address.emplace(texture_info.GetRawAddress(), entry);
  1605. if (safety_color_sample_size == 0 ||
  1606. std::max(texture_info.GetTextureSize(), creation_info.palette_size) <=
  1607. (u32)safety_color_sample_size * 8)
  1608. {
  1609. entry->textures_by_hash_iter = m_textures_by_hash.emplace(creation_info.full_hash, entry);
  1610. }
  1611. const TextureAndTLUTFormat full_format(texture_info.GetTextureFormat(),
  1612. texture_info.GetTlutFormat());
  1613. entry->SetGeneralParameters(texture_info.GetRawAddress(), texture_info.GetTextureSize(),
  1614. full_format, false);
  1615. entry->SetDimensions(texture_info.GetRawWidth(), texture_info.GetRawHeight(),
  1616. texture_info.GetLevelCount());
  1617. entry->SetHashes(creation_info.base_hash, creation_info.full_hash);
  1618. entry->memory_stride = entry->BytesPerRow();
  1619. entry->SetNotCopy();
  1620. INCSTAT(g_stats.num_textures_uploaded);
  1621. SETSTAT(g_stats.num_textures_alive, static_cast<int>(m_textures_by_address.size()));
  1622. entry = DoPartialTextureUpdates(iter->second, texture_info.GetTlutAddress(),
  1623. texture_info.GetTlutFormat());
  1624. // This should only be needed if the texture was updated, or used GPU decoding.
  1625. entry->texture->FinishedRendering();
  1626. return entry;
  1627. }
  1628. static void GetDisplayRectForXFBEntry(TCacheEntry* entry, u32 width, u32 height,
  1629. MathUtil::Rectangle<int>* display_rect)
  1630. {
  1631. // Scale the sub-rectangle to the full resolution of the texture.
  1632. display_rect->left = 0;
  1633. display_rect->top = 0;
  1634. display_rect->right = static_cast<int>(width * entry->GetWidth() / entry->native_width);
  1635. display_rect->bottom = static_cast<int>(height * entry->GetHeight() / entry->native_height);
  1636. }
  1637. RcTcacheEntry TextureCacheBase::GetXFBTexture(u32 address, u32 width, u32 height, u32 stride,
  1638. MathUtil::Rectangle<int>* display_rect)
  1639. {
  1640. // Compute total texture size. XFB textures aren't tiled, so this is simple.
  1641. const u32 total_size = height * stride;
  1642. auto& system = Core::System::GetInstance();
  1643. auto& memory = system.GetMemory();
  1644. const u8* src_data = memory.GetPointerForRange(address, total_size);
  1645. if (!src_data)
  1646. {
  1647. ERROR_LOG_FMT(VIDEO, "Trying to load XFB texture from invalid address {:#010x}", address);
  1648. return {};
  1649. }
  1650. // Do we currently have a mutable version of this XFB copy in VRAM?
  1651. RcTcacheEntry entry = GetXFBFromCache(address, width, height, stride);
  1652. if (entry && !entry->IsLocked())
  1653. {
  1654. if (entry->is_xfb_container)
  1655. {
  1656. StitchXFBCopy(entry);
  1657. entry->texture->FinishedRendering();
  1658. }
  1659. GetDisplayRectForXFBEntry(entry.get(), width, height, display_rect);
  1660. return entry;
  1661. }
  1662. // Create a new VRAM texture, and fill it with the data from guest RAM.
  1663. entry = AllocateCacheEntry(TextureConfig(width, height, 1, 1, 1, AbstractTextureFormat::RGBA8,
  1664. AbstractTextureFlag_RenderTarget,
  1665. AbstractTextureType::Texture_2DArray));
  1666. entry->SetGeneralParameters(address, total_size,
  1667. TextureAndTLUTFormat(TextureFormat::XFB, TLUTFormat::IA8), true);
  1668. entry->SetDimensions(width, height, 1);
  1669. entry->SetXfbCopy(stride);
  1670. const u64 hash = entry->CalculateHash();
  1671. entry->SetHashes(hash, hash);
  1672. entry->is_xfb_container = true;
  1673. entry->is_custom_tex = false;
  1674. entry->may_have_overlapping_textures = false;
  1675. entry->frameCount = FRAMECOUNT_INVALID;
  1676. if (!g_ActiveConfig.UseGPUTextureDecoding() ||
  1677. !DecodeTextureOnGPU(entry, 0, src_data, total_size, entry->format.texfmt, width, height,
  1678. width, height, stride, s_tex_mem.data(), entry->format.tlutfmt))
  1679. {
  1680. const u32 decoded_size = width * height * sizeof(u32);
  1681. CheckTempSize(decoded_size);
  1682. TexDecoder_DecodeXFB(m_temp, src_data, width, height, stride);
  1683. entry->texture->Load(0, width, height, width, m_temp, decoded_size);
  1684. }
  1685. // Stitch any VRAM copies into the new RAM copy.
  1686. StitchXFBCopy(entry);
  1687. entry->texture->FinishedRendering();
  1688. // Insert into the texture cache so we can re-use it next frame, if needed.
  1689. m_textures_by_address.emplace(entry->addr, entry);
  1690. SETSTAT(g_stats.num_textures_alive, static_cast<int>(m_textures_by_address.size()));
  1691. INCSTAT(g_stats.num_textures_uploaded);
  1692. if (g_ActiveConfig.bDumpXFBTarget || g_ActiveConfig.bGraphicMods)
  1693. {
  1694. const std::string id = fmt::format("{}x{}", width, height);
  1695. if (g_ActiveConfig.bGraphicMods)
  1696. {
  1697. entry->texture_info_name = fmt::format("{}_{}", XFB_DUMP_PREFIX, id);
  1698. }
  1699. if (g_ActiveConfig.bDumpXFBTarget)
  1700. {
  1701. entry->texture->Save(fmt::format("{}{}_n{:06}_{}.png", File::GetUserPath(D_DUMPTEXTURES_IDX),
  1702. XFB_DUMP_PREFIX, xfb_count++, id),
  1703. 0);
  1704. }
  1705. }
  1706. GetDisplayRectForXFBEntry(entry.get(), width, height, display_rect);
  1707. return entry;
  1708. }
  1709. RcTcacheEntry TextureCacheBase::GetXFBFromCache(u32 address, u32 width, u32 height, u32 stride)
  1710. {
  1711. auto iter_range = m_textures_by_address.equal_range(address);
  1712. TexAddrCache::iterator iter = iter_range.first;
  1713. while (iter != iter_range.second)
  1714. {
  1715. auto& entry = iter->second;
  1716. // The only thing which has to match exactly is the stride. We can use a partial rectangle if
  1717. // the VI width/height differs from that of the XFB copy.
  1718. if (entry->is_xfb_copy && entry->memory_stride == stride && entry->native_width >= width &&
  1719. entry->native_height >= height && !entry->may_have_overlapping_textures)
  1720. {
  1721. if (entry->hash == entry->CalculateHash() && !entry->reference_changed)
  1722. {
  1723. return entry;
  1724. }
  1725. else
  1726. {
  1727. // At this point, we either have an xfb copy that has changed its hash
  1728. // or an xfb created by stitching or from memory that has been changed
  1729. // we are safe to invalidate this
  1730. iter = InvalidateTexture(iter);
  1731. continue;
  1732. }
  1733. }
  1734. ++iter;
  1735. }
  1736. return {};
  1737. }
  1738. void TextureCacheBase::StitchXFBCopy(RcTcacheEntry& stitched_entry)
  1739. {
  1740. // It is possible that some of the overlapping textures overlap each other. This behavior has been
  1741. // seen with XFB copies in Rogue Leader. To get the correct result, we apply the texture updates
  1742. // in the order the textures were originally loaded. This ensures that the parts of the texture
  1743. // that would have been overwritten in memory on real hardware get overwritten the same way here
  1744. // too. This should work, but it may be a better idea to keep track of partial XFB copy
  1745. // invalidations instead, which would reduce the amount of copying work here.
  1746. std::vector<TCacheEntry*> candidates;
  1747. bool create_upscaled_copy = false;
  1748. auto iter = FindOverlappingTextures(stitched_entry->addr, stitched_entry->size_in_bytes);
  1749. while (iter.first != iter.second)
  1750. {
  1751. // Currently, this checks the stride of the VRAM copy against the VI request. Therefore, for
  1752. // interlaced modes, VRAM copies won't be considered candidates. This is okay for now, because
  1753. // our force progressive hack means that an XFB copy should always have a matching stride. If
  1754. // the hack is disabled, XFB2RAM should also be enabled. Should we wish to implement interlaced
  1755. // stitching in the future, this would require a shader which grabs every second line.
  1756. auto& entry = iter.first->second;
  1757. if (entry != stitched_entry && entry->IsCopy() &&
  1758. entry->OverlapsMemoryRange(stitched_entry->addr, stitched_entry->size_in_bytes) &&
  1759. entry->memory_stride == stitched_entry->memory_stride)
  1760. {
  1761. if (entry->hash == entry->CalculateHash())
  1762. {
  1763. // Can't check the height here because of Y scaling.
  1764. if (entry->native_width != entry->GetWidth())
  1765. create_upscaled_copy = true;
  1766. candidates.emplace_back(entry.get());
  1767. }
  1768. else
  1769. {
  1770. // If the hash does not match, this EFB copy will not be used for anything, so remove it
  1771. iter.first = InvalidateTexture(iter.first);
  1772. continue;
  1773. }
  1774. }
  1775. ++iter.first;
  1776. }
  1777. if (candidates.empty())
  1778. return;
  1779. std::sort(candidates.begin(), candidates.end(),
  1780. [](const TCacheEntry* a, const TCacheEntry* b) { return a->id < b->id; });
  1781. // We only upscale when necessary to preserve resolution. i.e. when there are upscaled partial
  1782. // copies to be stitched together.
  1783. if (create_upscaled_copy)
  1784. {
  1785. ScaleTextureCacheEntryTo(stitched_entry,
  1786. g_framebuffer_manager->EFBToScaledX(stitched_entry->native_width),
  1787. g_framebuffer_manager->EFBToScaledY(stitched_entry->native_height));
  1788. }
  1789. for (TCacheEntry* entry : candidates)
  1790. {
  1791. int src_x, src_y, dst_x, dst_y;
  1792. if (entry->addr >= stitched_entry->addr)
  1793. {
  1794. int pixel_offset = (entry->addr - stitched_entry->addr) / 2;
  1795. src_x = 0;
  1796. src_y = 0;
  1797. dst_x = pixel_offset % stitched_entry->native_width;
  1798. dst_y = pixel_offset / stitched_entry->native_width;
  1799. }
  1800. else
  1801. {
  1802. int pixel_offset = (stitched_entry->addr - entry->addr) / 2;
  1803. src_x = pixel_offset % entry->native_width;
  1804. src_y = pixel_offset / entry->native_width;
  1805. dst_x = 0;
  1806. dst_y = 0;
  1807. }
  1808. const int native_width =
  1809. std::min(entry->native_width - src_x, stitched_entry->native_width - dst_x);
  1810. const int native_height =
  1811. std::min(entry->native_height - src_y, stitched_entry->native_height - dst_y);
  1812. int src_width = native_width;
  1813. int src_height = native_height;
  1814. int dst_width = native_width;
  1815. int dst_height = native_height;
  1816. // Scale to internal resolution.
  1817. if (entry->native_width != entry->GetWidth())
  1818. {
  1819. src_x = g_framebuffer_manager->EFBToScaledX(src_x);
  1820. src_y = g_framebuffer_manager->EFBToScaledY(src_y);
  1821. src_width = g_framebuffer_manager->EFBToScaledX(src_width);
  1822. src_height = g_framebuffer_manager->EFBToScaledY(src_height);
  1823. }
  1824. if (create_upscaled_copy)
  1825. {
  1826. dst_x = g_framebuffer_manager->EFBToScaledX(dst_x);
  1827. dst_y = g_framebuffer_manager->EFBToScaledY(dst_y);
  1828. dst_width = g_framebuffer_manager->EFBToScaledX(dst_width);
  1829. dst_height = g_framebuffer_manager->EFBToScaledY(dst_height);
  1830. }
  1831. // If the source rectangle is outside of what we actually have in VRAM, skip the copy.
  1832. // The backend doesn't do any clamping, so if we don't, we'd pass out-of-range coordinates
  1833. // to the graphics driver, which can cause GPU resets.
  1834. if (static_cast<u32>(src_x + src_width) > entry->GetWidth() ||
  1835. static_cast<u32>(src_y + src_height) > entry->GetHeight() ||
  1836. static_cast<u32>(dst_x + dst_width) > stitched_entry->GetWidth() ||
  1837. static_cast<u32>(dst_y + dst_height) > stitched_entry->GetHeight())
  1838. {
  1839. continue;
  1840. }
  1841. MathUtil::Rectangle<int> srcrect, dstrect;
  1842. srcrect.left = src_x;
  1843. srcrect.top = src_y;
  1844. srcrect.right = (src_x + src_width);
  1845. srcrect.bottom = (src_y + src_height);
  1846. dstrect.left = dst_x;
  1847. dstrect.top = dst_y;
  1848. dstrect.right = (dst_x + dst_width);
  1849. dstrect.bottom = (dst_y + dst_height);
  1850. // We may have to scale if one of the copies is not internal resolution.
  1851. if (srcrect.GetWidth() != dstrect.GetWidth() || srcrect.GetHeight() != dstrect.GetHeight())
  1852. {
  1853. g_gfx->ScaleTexture(stitched_entry->framebuffer.get(), dstrect, entry->texture.get(),
  1854. srcrect);
  1855. }
  1856. else
  1857. {
  1858. // If one copy is stereo, and the other isn't... not much we can do here :/
  1859. const u32 layers_to_copy = std::min(entry->GetNumLayers(), stitched_entry->GetNumLayers());
  1860. for (u32 layer = 0; layer < layers_to_copy; layer++)
  1861. {
  1862. stitched_entry->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer, 0,
  1863. dstrect, layer, 0);
  1864. }
  1865. }
  1866. // Link the two textures together, so we won't apply this partial update again
  1867. entry->CreateReference(stitched_entry.get());
  1868. // Mark the texture update as used, as if it was loaded directly
  1869. entry->frameCount = FRAMECOUNT_INVALID;
  1870. }
  1871. }
  1872. std::array<u32, 3>
  1873. TextureCacheBase::GetRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values& coefficients)
  1874. {
  1875. // To simplify the backend, we precalculate the three coefficients in common. Coefficients 0, 1
  1876. // are for the row above, 2, 3, 4 are for the current pixel, and 5, 6 are for the row below.
  1877. return {
  1878. static_cast<u32>(coefficients[0]) + static_cast<u32>(coefficients[1]),
  1879. static_cast<u32>(coefficients[2]) + static_cast<u32>(coefficients[3]) +
  1880. static_cast<u32>(coefficients[4]),
  1881. static_cast<u32>(coefficients[5]) + static_cast<u32>(coefficients[6]),
  1882. };
  1883. }
  1884. std::array<u32, 3>
  1885. TextureCacheBase::GetVRAMCopyFilterCoefficients(const CopyFilterCoefficients::Values& coefficients)
  1886. {
  1887. // If the user disables the copy filter, only apply it to the VRAM copy.
  1888. // This way games which are sensitive to changes to the RAM copy of the XFB will be unaffected.
  1889. std::array<u32, 3> res = GetRAMCopyFilterCoefficients(coefficients);
  1890. if (!g_ActiveConfig.bDisableCopyFilter)
  1891. return res;
  1892. // Disabling the copy filter in options should not ignore the values the game sets completely,
  1893. // as some games use the filter coefficients to control the brightness of the screen. Instead,
  1894. // add all coefficients to the middle sample, so the deflicker/vertical filter has no effect.
  1895. res[1] = res[0] + res[1] + res[2];
  1896. res[0] = 0;
  1897. res[2] = 0;
  1898. return res;
  1899. }
  1900. bool TextureCacheBase::AllCopyFilterCoefsNeeded(const std::array<u32, 3>& coefficients)
  1901. {
  1902. // If the top/bottom coefficients are zero, no point sampling/blending from these rows.
  1903. return coefficients[0] != 0 || coefficients[2] != 0;
  1904. }
  1905. bool TextureCacheBase::CopyFilterCanOverflow(const std::array<u32, 3>& coefficients)
  1906. {
  1907. // Normally, the copy filter coefficients will sum to at most 64. If the sum is higher than that,
  1908. // colors are clamped to the range [0, 255], but if the sum is higher than 128, that clamping
  1909. // breaks (as colors end up >= 512, which wraps back to 0).
  1910. return coefficients[0] + coefficients[1] + coefficients[2] >= 128;
  1911. }
  1912. void TextureCacheBase::CopyRenderTargetToTexture(
  1913. u32 dstAddr, EFBCopyFormat dstFormat, u32 width, u32 height, u32 dstStride, bool is_depth_copy,
  1914. const MathUtil::Rectangle<int>& srcRect, bool isIntensity, bool scaleByHalf, float y_scale,
  1915. float gamma, bool clamp_top, bool clamp_bottom,
  1916. const CopyFilterCoefficients::Values& filter_coefficients)
  1917. {
  1918. // Emulation methods:
  1919. //
  1920. // - EFB to RAM:
  1921. // Encodes the requested EFB data at its native resolution to the emulated RAM using shaders.
  1922. // Load() decodes the data from there again (using TextureDecoder) if the EFB copy is being
  1923. // used as a texture again.
  1924. // Advantage: CPU can read data from the EFB copy and we don't lose any important updates to
  1925. // the texture
  1926. // Disadvantage: Encoding+decoding steps often are redundant because only some games read or
  1927. // modify EFB copies before using them as textures.
  1928. //
  1929. // - EFB to texture:
  1930. // Copies the requested EFB data to a texture object in VRAM, performing any color conversion
  1931. // using shaders.
  1932. // Advantage: Works for many games, since in most cases EFB copies aren't read or modified at
  1933. // all before being used as a texture again.
  1934. // Since we don't do any further encoding or decoding here, this method is much
  1935. // faster.
  1936. // It also allows enhancing the visual quality by doing scaled EFB copies.
  1937. //
  1938. // - Hybrid EFB copies:
  1939. // 1a) Whenever this function gets called, encode the requested EFB data to RAM (like EFB to
  1940. // RAM)
  1941. // 1b) Set type to TCET_EC_DYNAMIC for all texture cache entries in the destination address
  1942. // range.
  1943. // If EFB copy caching is enabled, further checks will (try to) prevent redundant EFB
  1944. // copies.
  1945. // 2) Check if a texture cache entry for the specified dstAddr already exists (i.e. if an EFB
  1946. // copy was triggered to that address before):
  1947. // 2a) Entry doesn't exist:
  1948. // - Also copy the requested EFB data to a texture object in VRAM (like EFB to texture)
  1949. // - Create a texture cache entry for the target (type = TCET_EC_VRAM)
  1950. // - Store a hash of the encoded RAM data in the texcache entry.
  1951. // 2b) Entry exists AND type is TCET_EC_VRAM:
  1952. // - Like case 2a, but reuse the old texcache entry instead of creating a new one.
  1953. // 2c) Entry exists AND type is TCET_EC_DYNAMIC:
  1954. // - Only encode the texture to RAM (like EFB to RAM) and store a hash of the encoded
  1955. // data in the existing texcache entry.
  1956. // - Do NOT copy the requested EFB data to a VRAM object. Reason: the texture is dynamic,
  1957. // i.e. the CPU is modifying it. Storing a VRAM copy is useless, because we'd always end
  1958. // up deleting it and reloading the data from RAM anyway.
  1959. // 3) If the EFB copy gets used as a texture, compare the source RAM hash with the hash you
  1960. // stored when encoding the EFB data to RAM.
  1961. // 3a) If the two hashes match AND type is TCET_EC_VRAM, reuse the VRAM copy you created
  1962. // 3b) If the two hashes differ AND type is TCET_EC_VRAM, screw your existing VRAM copy. Set
  1963. // type to TCET_EC_DYNAMIC.
  1964. // Redecode the source RAM data to a VRAM object. The entry basically behaves like a
  1965. // normal texture now.
  1966. // 3c) If type is TCET_EC_DYNAMIC, treat the EFB copy like a normal texture.
  1967. // Advantage: Non-dynamic EFB copies can be visually enhanced like with EFB to texture.
  1968. // Compatibility is as good as EFB to RAM.
  1969. // Disadvantage: Slower than EFB to texture and often even slower than EFB to RAM.
  1970. // EFB copy cache depends on accurate texture hashing being enabled. However,
  1971. // with accurate hashing you end up being as slow as without a copy cache
  1972. // anyway.
  1973. //
  1974. // Disadvantage of all methods: Calling this function requires the GPU to perform a pipeline flush
  1975. // which stalls any further CPU processing.
  1976. const bool is_xfb_copy = !is_depth_copy && !isIntensity && dstFormat == EFBCopyFormat::XFB;
  1977. bool copy_to_vram =
  1978. g_ActiveConfig.backend_info.bSupportsCopyToVram && !g_ActiveConfig.bDisableCopyToVRAM;
  1979. bool copy_to_ram =
  1980. !(is_xfb_copy ? g_ActiveConfig.bSkipXFBCopyToRam : g_ActiveConfig.bSkipEFBCopyToRam) ||
  1981. !copy_to_vram;
  1982. // tex_w and tex_h are the native size of the texture in the GC memory.
  1983. // The size scaled_* represents the emulated texture. Those differ
  1984. // because of upscaling and because of yscaling of XFB copies.
  1985. // For the latter, we keep the EFB resolution for the virtual XFB blit.
  1986. u32 tex_w = width;
  1987. u32 tex_h = height;
  1988. u32 scaled_tex_w = g_framebuffer_manager->EFBToScaledX(width);
  1989. u32 scaled_tex_h = g_framebuffer_manager->EFBToScaledY(height);
  1990. if (scaleByHalf)
  1991. {
  1992. tex_w /= 2;
  1993. tex_h /= 2;
  1994. scaled_tex_w /= 2;
  1995. scaled_tex_h /= 2;
  1996. }
  1997. if (!is_xfb_copy && !g_ActiveConfig.bCopyEFBScaled)
  1998. {
  1999. // No upscaling
  2000. scaled_tex_w = tex_w;
  2001. scaled_tex_h = tex_h;
  2002. }
  2003. // Get the base (in memory) format of this efb copy.
  2004. TextureFormat baseFormat = TexDecoder_GetEFBCopyBaseFormat(dstFormat);
  2005. u32 blockH = TexDecoder_GetBlockHeightInTexels(baseFormat);
  2006. const u32 blockW = TexDecoder_GetBlockWidthInTexels(baseFormat);
  2007. // Round up source height to multiple of block size
  2008. u32 actualHeight = Common::AlignUp(tex_h, blockH);
  2009. const u32 actualWidth = Common::AlignUp(tex_w, blockW);
  2010. u32 num_blocks_y = actualHeight / blockH;
  2011. const u32 num_blocks_x = actualWidth / blockW;
  2012. // RGBA takes two cache lines per block; all others take one
  2013. const u32 bytes_per_block = baseFormat == TextureFormat::RGBA8 ? 64 : 32;
  2014. const u32 bytes_per_row = num_blocks_x * bytes_per_block;
  2015. const u32 covered_range = num_blocks_y * dstStride;
  2016. auto& system = Core::System::GetInstance();
  2017. auto& memory = system.GetMemory();
  2018. u8* dst = memory.GetPointerForRange(dstAddr, covered_range);
  2019. if (dst == nullptr)
  2020. {
  2021. ERROR_LOG_FMT(VIDEO, "Trying to copy from EFB to invalid address {:#010x}", dstAddr);
  2022. return;
  2023. }
  2024. if (g_ActiveConfig.bGraphicMods)
  2025. {
  2026. FBInfo info;
  2027. info.m_width = tex_w;
  2028. info.m_height = tex_h;
  2029. info.m_texture_format = baseFormat;
  2030. if (is_xfb_copy)
  2031. {
  2032. for (const auto& action : g_graphics_mod_manager->GetXFBActions(info))
  2033. {
  2034. action->OnXFB();
  2035. }
  2036. }
  2037. else
  2038. {
  2039. bool skip = false;
  2040. GraphicsModActionData::EFB efb{tex_w, tex_h, &skip, &scaled_tex_w, &scaled_tex_h};
  2041. for (const auto& action : g_graphics_mod_manager->GetEFBActions(info))
  2042. {
  2043. action->OnEFB(&efb);
  2044. }
  2045. if (skip == true)
  2046. {
  2047. if (copy_to_ram)
  2048. UninitializeEFBMemory(dst, dstStride, bytes_per_row, num_blocks_y);
  2049. return;
  2050. }
  2051. }
  2052. }
  2053. if (dstStride < bytes_per_row)
  2054. {
  2055. // This kind of efb copy results in a scrambled image.
  2056. // I'm pretty sure no game actually wants to do this, it might be caused by a
  2057. // programming bug in the game, or a CPU/Bounding box emulation issue with dolphin.
  2058. // The copy_to_ram code path above handles this "correctly" and scrambles the image
  2059. // but the copy_to_vram code path just saves and uses unscrambled texture instead.
  2060. // To avoid a "incorrect" result, we simply skip doing the copy_to_vram code path
  2061. // so if the game does try to use the scrambled texture, dolphin will grab the scrambled
  2062. // texture (or black if copy_to_ram is also disabled) out of ram.
  2063. ERROR_LOG_FMT(VIDEO, "Memory stride too small ({} < {})", dstStride, bytes_per_row);
  2064. copy_to_vram = false;
  2065. }
  2066. // We also linear filtering for both box filtering and downsampling higher resolutions to 1x.
  2067. // TODO: This only produces perfect downsampling for 2x IR, other resolutions will need more
  2068. // complex down filtering to average all pixels and produce the correct result.
  2069. const bool linear_filter =
  2070. !is_depth_copy &&
  2071. (scaleByHalf || g_framebuffer_manager->GetEFBScale() != 1 || y_scale > 1.0f);
  2072. RcTcacheEntry entry;
  2073. if (copy_to_vram)
  2074. {
  2075. // create the texture
  2076. const TextureConfig config(scaled_tex_w, scaled_tex_h, 1, g_framebuffer_manager->GetEFBLayers(),
  2077. 1, AbstractTextureFormat::RGBA8, AbstractTextureFlag_RenderTarget,
  2078. AbstractTextureType::Texture_2DArray);
  2079. entry = AllocateCacheEntry(config);
  2080. if (entry)
  2081. {
  2082. entry->SetGeneralParameters(dstAddr, 0, baseFormat, is_xfb_copy);
  2083. entry->SetDimensions(tex_w, tex_h, 1);
  2084. entry->frameCount = FRAMECOUNT_INVALID;
  2085. if (is_xfb_copy)
  2086. {
  2087. entry->should_force_safe_hashing = is_xfb_copy;
  2088. entry->SetXfbCopy(dstStride);
  2089. }
  2090. else
  2091. {
  2092. entry->SetEfbCopy(dstStride);
  2093. }
  2094. entry->may_have_overlapping_textures = false;
  2095. entry->is_custom_tex = false;
  2096. CopyEFBToCacheEntry(entry, is_depth_copy, srcRect, scaleByHalf, linear_filter, dstFormat,
  2097. isIntensity, gamma, clamp_top, clamp_bottom,
  2098. GetVRAMCopyFilterCoefficients(filter_coefficients));
  2099. if (is_xfb_copy && (g_ActiveConfig.bDumpXFBTarget || g_ActiveConfig.bGraphicMods))
  2100. {
  2101. const std::string id = fmt::format("{}x{}", tex_w, tex_h);
  2102. if (g_ActiveConfig.bGraphicMods)
  2103. {
  2104. entry->texture_info_name = fmt::format("{}_{}", XFB_DUMP_PREFIX, id);
  2105. }
  2106. if (g_ActiveConfig.bDumpXFBTarget)
  2107. {
  2108. entry->texture->Save(fmt::format("{}{}_n{:06}_{}.png",
  2109. File::GetUserPath(D_DUMPTEXTURES_IDX), XFB_DUMP_PREFIX,
  2110. xfb_count++, id),
  2111. 0);
  2112. }
  2113. }
  2114. else if (g_ActiveConfig.bDumpEFBTarget || g_ActiveConfig.bGraphicMods)
  2115. {
  2116. const std::string id = fmt::format("{}x{}_{}", tex_w, tex_h, static_cast<int>(baseFormat));
  2117. if (g_ActiveConfig.bGraphicMods)
  2118. {
  2119. entry->texture_info_name = fmt::format("{}_{}", EFB_DUMP_PREFIX, id);
  2120. }
  2121. if (g_ActiveConfig.bDumpEFBTarget)
  2122. {
  2123. static int efb_count = 0;
  2124. entry->texture->Save(fmt::format("{}{}_n{:06}_{}.png",
  2125. File::GetUserPath(D_DUMPTEXTURES_IDX), EFB_DUMP_PREFIX,
  2126. efb_count++, id),
  2127. 0);
  2128. }
  2129. }
  2130. }
  2131. }
  2132. if (copy_to_ram)
  2133. {
  2134. const std::array<u32, 3> coefficients = GetRAMCopyFilterCoefficients(filter_coefficients);
  2135. PixelFormat srcFormat = bpmem.zcontrol.pixel_format;
  2136. EFBCopyParams format(srcFormat, dstFormat, is_depth_copy, isIntensity,
  2137. AllCopyFilterCoefsNeeded(coefficients),
  2138. CopyFilterCanOverflow(coefficients), gamma != 1.0);
  2139. std::unique_ptr<AbstractStagingTexture> staging_texture = GetEFBCopyStagingTexture();
  2140. if (staging_texture)
  2141. {
  2142. CopyEFB(staging_texture.get(), format, tex_w, bytes_per_row, num_blocks_y, dstStride, srcRect,
  2143. scaleByHalf, linear_filter, y_scale, gamma, clamp_top, clamp_bottom, coefficients);
  2144. // We can't defer if there is no VRAM copy (since we need to update the hash).
  2145. if (!copy_to_vram || !g_ActiveConfig.bDeferEFBCopies)
  2146. {
  2147. // Immediately flush it.
  2148. WriteEFBCopyToRAM(dst, bytes_per_row / sizeof(u32), num_blocks_y, dstStride,
  2149. std::move(staging_texture));
  2150. }
  2151. else
  2152. {
  2153. // Defer the flush until later.
  2154. entry->pending_efb_copy = std::move(staging_texture);
  2155. entry->pending_efb_copy_width = bytes_per_row / sizeof(u32);
  2156. entry->pending_efb_copy_height = num_blocks_y;
  2157. m_pending_efb_copies.push_back(entry);
  2158. }
  2159. }
  2160. }
  2161. else
  2162. {
  2163. if (is_xfb_copy)
  2164. {
  2165. UninitializeXFBMemory(dst, dstStride, bytes_per_row, num_blocks_y);
  2166. }
  2167. else
  2168. {
  2169. UninitializeEFBMemory(dst, dstStride, bytes_per_row, num_blocks_y);
  2170. }
  2171. }
  2172. // Invalidate all textures, if they are either fully overwritten by our efb copy, or if they
  2173. // have a different stride than our efb copy. Partly overwritten textures with the same stride
  2174. // as our efb copy are marked to check them for partial texture updates.
  2175. // TODO: The logic to detect overlapping strided efb copies is not 100% accurate.
  2176. bool strided_efb_copy = dstStride != bytes_per_row;
  2177. auto iter = FindOverlappingTextures(dstAddr, covered_range);
  2178. while (iter.first != iter.second)
  2179. {
  2180. RcTcacheEntry& overlapping_entry = iter.first->second;
  2181. if (overlapping_entry->addr == dstAddr && overlapping_entry->is_xfb_copy)
  2182. {
  2183. for (auto& reference : overlapping_entry->references)
  2184. {
  2185. reference->reference_changed = true;
  2186. }
  2187. }
  2188. if (overlapping_entry->OverlapsMemoryRange(dstAddr, covered_range))
  2189. {
  2190. u32 overlap_range = std::min(overlapping_entry->addr + overlapping_entry->size_in_bytes,
  2191. dstAddr + covered_range) -
  2192. std::max(overlapping_entry->addr, dstAddr);
  2193. if (!copy_to_vram || overlapping_entry->memory_stride != dstStride ||
  2194. (!strided_efb_copy && overlapping_entry->size_in_bytes == overlap_range) ||
  2195. (strided_efb_copy && overlapping_entry->size_in_bytes == overlap_range &&
  2196. overlapping_entry->addr == dstAddr))
  2197. {
  2198. // Pending EFB copies which are completely covered by this new copy can simply be tossed,
  2199. // instead of having to flush them later on, since this copy will write over everything.
  2200. iter.first = InvalidateTexture(iter.first, true);
  2201. continue;
  2202. }
  2203. // We don't want to change the may_have_overlapping_textures flag on XFB container entries
  2204. // because otherwise they can't be re-used/updated, leaking textures for several frames.
  2205. if (!overlapping_entry->is_xfb_container)
  2206. overlapping_entry->may_have_overlapping_textures = true;
  2207. // There are cases (Rogue Squadron 2 / Texas Holdem on Wiiware) where
  2208. // for xfb copies the textures overlap which causes the hash of the first copy
  2209. // to be different (from when it was originally created). This has no implications
  2210. // for XFB2Tex because the underlying memory doesn't change (dummy values) but
  2211. // can affect XFB2Ram when we compare the texture cache copy hash with the
  2212. // newly computed hash
  2213. // By calculating the hash when we receive overlapping xfbs, we are able
  2214. // to mitigate this
  2215. if (overlapping_entry->is_xfb_copy && copy_to_ram)
  2216. {
  2217. overlapping_entry->hash = overlapping_entry->CalculateHash();
  2218. }
  2219. // Do not load textures by hash, if they were at least partly overwritten by an efb copy.
  2220. // In this case, comparing the hash is not enough to check, if two textures are identical.
  2221. if (overlapping_entry->textures_by_hash_iter != m_textures_by_hash.end())
  2222. {
  2223. m_textures_by_hash.erase(overlapping_entry->textures_by_hash_iter);
  2224. overlapping_entry->textures_by_hash_iter = m_textures_by_hash.end();
  2225. }
  2226. }
  2227. ++iter.first;
  2228. }
  2229. if (OpcodeDecoder::g_record_fifo_data)
  2230. {
  2231. // Mark the memory behind this efb copy as dynamicly generated for the Fifo log
  2232. u32 address = dstAddr;
  2233. for (u32 i = 0; i < num_blocks_y; i++)
  2234. {
  2235. Core::System::GetInstance().GetFifoRecorder().UseMemory(address, bytes_per_row,
  2236. MemoryUpdate::Type::TextureMap, true);
  2237. address += dstStride;
  2238. }
  2239. }
  2240. // Even if the copy is deferred, still compute the hash. This way if the copy is used as a texture
  2241. // in a subsequent draw before it is flushed, it will have the same hash.
  2242. if (entry)
  2243. {
  2244. const u64 hash = entry->CalculateHash();
  2245. entry->SetHashes(hash, hash);
  2246. m_textures_by_address.emplace(dstAddr, std::move(entry));
  2247. }
  2248. }
  2249. void TextureCacheBase::FlushEFBCopies()
  2250. {
  2251. if (m_pending_efb_copies.empty())
  2252. return;
  2253. for (auto& entry : m_pending_efb_copies)
  2254. FlushEFBCopy(entry.get());
  2255. m_pending_efb_copies.clear();
  2256. }
  2257. void TextureCacheBase::FlushStaleBinds()
  2258. {
  2259. for (u32 i = 0; i < m_bound_textures.size(); i++)
  2260. {
  2261. if (!TMEM::IsCached(i))
  2262. m_bound_textures[i].reset();
  2263. }
  2264. }
  2265. void TextureCacheBase::WriteEFBCopyToRAM(u8* dst_ptr, u32 width, u32 height, u32 stride,
  2266. std::unique_ptr<AbstractStagingTexture> staging_texture)
  2267. {
  2268. MathUtil::Rectangle<int> copy_rect(0, 0, static_cast<int>(width), static_cast<int>(height));
  2269. staging_texture->ReadTexels(copy_rect, dst_ptr, stride);
  2270. ReleaseEFBCopyStagingTexture(std::move(staging_texture));
  2271. }
  2272. void TextureCacheBase::FlushEFBCopy(TCacheEntry* entry)
  2273. {
  2274. const u32 covered_range = entry->pending_efb_copy_height * entry->memory_stride;
  2275. // Copy from texture -> guest memory.
  2276. auto& system = Core::System::GetInstance();
  2277. auto& memory = system.GetMemory();
  2278. u8* const dst = memory.GetPointerForRange(entry->addr, covered_range);
  2279. WriteEFBCopyToRAM(dst, entry->pending_efb_copy_width, entry->pending_efb_copy_height,
  2280. entry->memory_stride, std::move(entry->pending_efb_copy));
  2281. // If the EFB copy was invalidated (e.g. the bloom case mentioned in InvalidateTexture), we don't
  2282. // need to do anything more. The entry will be automatically deleted by smart pointers
  2283. if (entry->invalidated)
  2284. return;
  2285. // Re-hash the texture now that the guest memory is populated.
  2286. // This should be safe because we'll catch any writes before the game can modify it.
  2287. const u64 hash = entry->CalculateHash();
  2288. entry->SetHashes(hash, hash);
  2289. // Check for any overlapping XFB copies which now need the hash recomputed.
  2290. // See the comment above regarding Rogue Squadron 2.
  2291. if (entry->is_xfb_copy)
  2292. {
  2293. auto range = FindOverlappingTextures(entry->addr, covered_range);
  2294. for (auto iter = range.first; iter != range.second; ++iter)
  2295. {
  2296. auto& overlapping_entry = iter->second;
  2297. if (overlapping_entry->may_have_overlapping_textures && overlapping_entry->is_xfb_copy &&
  2298. overlapping_entry->OverlapsMemoryRange(entry->addr, covered_range))
  2299. {
  2300. const u64 overlapping_hash = overlapping_entry->CalculateHash();
  2301. entry->SetHashes(overlapping_hash, overlapping_hash);
  2302. }
  2303. }
  2304. }
  2305. }
  2306. std::unique_ptr<AbstractStagingTexture> TextureCacheBase::GetEFBCopyStagingTexture()
  2307. {
  2308. // Pull off the back first to re-use the most frequently used textures.
  2309. if (!m_efb_copy_staging_texture_pool.empty())
  2310. {
  2311. auto ptr = std::move(m_efb_copy_staging_texture_pool.back());
  2312. m_efb_copy_staging_texture_pool.pop_back();
  2313. return ptr;
  2314. }
  2315. std::unique_ptr<AbstractStagingTexture> tex = g_gfx->CreateStagingTexture(
  2316. StagingTextureType::Readback, m_efb_encoding_texture->GetConfig());
  2317. if (!tex)
  2318. WARN_LOG_FMT(VIDEO, "Failed to create EFB copy staging texture");
  2319. return tex;
  2320. }
  2321. void TextureCacheBase::ReleaseEFBCopyStagingTexture(std::unique_ptr<AbstractStagingTexture> tex)
  2322. {
  2323. m_efb_copy_staging_texture_pool.push_back(std::move(tex));
  2324. }
  2325. void TextureCacheBase::UninitializeEFBMemory(u8* dst, u32 stride, u32 bytes_per_row,
  2326. u32 num_blocks_y)
  2327. {
  2328. // Hack: Most games don't actually need the correct texture data in RAM
  2329. // and we can just keep a copy in VRAM. We zero the memory so we
  2330. // can check it hasn't changed before using our copy in VRAM.
  2331. u8* ptr = dst;
  2332. for (u32 i = 0; i < num_blocks_y; i++)
  2333. {
  2334. std::memset(ptr, 0, bytes_per_row);
  2335. ptr += stride;
  2336. }
  2337. }
  2338. void TextureCacheBase::UninitializeXFBMemory(u8* dst, u32 stride, u32 bytes_per_row,
  2339. u32 num_blocks_y)
  2340. {
  2341. // Originally, we planned on using a 'key color'
  2342. // for alpha to address partial xfbs (Mario Strikers / Chicken Little).
  2343. // This work was removed since it was unfinished but there
  2344. // was still a desire to differentiate between the old and the new approach
  2345. // which is why we still set uninitialized xfb memory to fuchsia
  2346. // (Y=1,U=254,V=254) instead of dark green (Y=0,U=0,V=0) in YUV
  2347. // like is done in the EFB path.
  2348. #if defined(_M_X86_64)
  2349. __m128i sixteenBytes = _mm_set1_epi16((s16)(u16)0xFE01);
  2350. #endif
  2351. for (u32 i = 0; i < num_blocks_y; i++)
  2352. {
  2353. u32 size = bytes_per_row;
  2354. u8* rowdst = dst;
  2355. #if defined(_M_X86_64)
  2356. while (size >= 16)
  2357. {
  2358. _mm_storeu_si128((__m128i*)rowdst, sixteenBytes);
  2359. size -= 16;
  2360. rowdst += 16;
  2361. }
  2362. #endif
  2363. for (u32 offset = 0; offset < size; offset++)
  2364. {
  2365. if (offset & 1)
  2366. {
  2367. rowdst[offset] = 254;
  2368. }
  2369. else
  2370. {
  2371. rowdst[offset] = 1;
  2372. }
  2373. }
  2374. dst += stride;
  2375. }
  2376. }
  2377. RcTcacheEntry TextureCacheBase::AllocateCacheEntry(const TextureConfig& config)
  2378. {
  2379. std::optional<TexPoolEntry> alloc = AllocateTexture(config);
  2380. if (!alloc)
  2381. return {};
  2382. auto cacheEntry =
  2383. std::make_shared<TCacheEntry>(std::move(alloc->texture), std::move(alloc->framebuffer));
  2384. cacheEntry->textures_by_hash_iter = m_textures_by_hash.end();
  2385. cacheEntry->id = m_last_entry_id++;
  2386. return cacheEntry;
  2387. }
  2388. std::optional<TextureCacheBase::TexPoolEntry>
  2389. TextureCacheBase::AllocateTexture(const TextureConfig& config)
  2390. {
  2391. TexPool::iterator iter = FindMatchingTextureFromPool(config);
  2392. if (iter != m_texture_pool.end())
  2393. {
  2394. auto entry = std::move(iter->second);
  2395. m_texture_pool.erase(iter);
  2396. return std::move(entry);
  2397. }
  2398. std::unique_ptr<AbstractTexture> texture = g_gfx->CreateTexture(config);
  2399. if (!texture)
  2400. {
  2401. WARN_LOG_FMT(VIDEO, "Failed to allocate a {}x{}x{} texture", config.width, config.height,
  2402. config.layers);
  2403. return {};
  2404. }
  2405. std::unique_ptr<AbstractFramebuffer> framebuffer;
  2406. if (config.IsRenderTarget())
  2407. {
  2408. framebuffer = g_gfx->CreateFramebuffer(texture.get(), nullptr);
  2409. if (!framebuffer)
  2410. {
  2411. WARN_LOG_FMT(VIDEO, "Failed to allocate a {}x{}x{} framebuffer", config.width, config.height,
  2412. config.layers);
  2413. return {};
  2414. }
  2415. }
  2416. INCSTAT(g_stats.num_textures_created);
  2417. return TexPoolEntry(std::move(texture), std::move(framebuffer));
  2418. }
  2419. TextureCacheBase::TexPool::iterator
  2420. TextureCacheBase::FindMatchingTextureFromPool(const TextureConfig& config)
  2421. {
  2422. // Find a texture from the pool that does not have a frameCount of FRAMECOUNT_INVALID.
  2423. // This prevents a texture from being used twice in a single frame with different data,
  2424. // which potentially means that a driver has to maintain two copies of the texture anyway.
  2425. // Render-target textures are fine through, as they have to be generated in a seperated pass.
  2426. // As non-render-target textures are usually static, this should not matter much.
  2427. auto range = m_texture_pool.equal_range(config);
  2428. auto matching_iter = std::find_if(range.first, range.second, [](const auto& iter) {
  2429. return iter.first.IsRenderTarget() || iter.second.frameCount != FRAMECOUNT_INVALID;
  2430. });
  2431. return matching_iter != range.second ? matching_iter : m_texture_pool.end();
  2432. }
  2433. TextureCacheBase::TexAddrCache::iterator TextureCacheBase::GetTexCacheIter(TCacheEntry* entry)
  2434. {
  2435. auto iter_range = m_textures_by_address.equal_range(entry->addr);
  2436. TexAddrCache::iterator iter = iter_range.first;
  2437. while (iter != iter_range.second)
  2438. {
  2439. if (iter->second.get() == entry)
  2440. {
  2441. return iter;
  2442. }
  2443. ++iter;
  2444. }
  2445. return m_textures_by_address.end();
  2446. }
  2447. std::pair<TextureCacheBase::TexAddrCache::iterator, TextureCacheBase::TexAddrCache::iterator>
  2448. TextureCacheBase::FindOverlappingTextures(u32 addr, u32 size_in_bytes)
  2449. {
  2450. // We index by the starting address only, so there is no way to query all textures
  2451. // which end after the given addr. But the GC textures have a limited size, so we
  2452. // look for all textures which have a start address bigger than addr minus the maximal
  2453. // texture size. But this yields false-positives which must be checked later on.
  2454. // 1024 x 1024 texel times 8 nibbles per texel
  2455. constexpr u32 max_texture_size = 1024 * 1024 * 4;
  2456. u32 lower_addr = addr > max_texture_size ? addr - max_texture_size : 0;
  2457. auto begin = m_textures_by_address.lower_bound(lower_addr);
  2458. auto end = m_textures_by_address.upper_bound(addr + size_in_bytes);
  2459. return std::make_pair(begin, end);
  2460. }
  2461. TextureCacheBase::TexAddrCache::iterator
  2462. TextureCacheBase::InvalidateTexture(TexAddrCache::iterator iter, bool discard_pending_efb_copy)
  2463. {
  2464. if (iter == m_textures_by_address.end())
  2465. return m_textures_by_address.end();
  2466. RcTcacheEntry& entry = iter->second;
  2467. if (entry->textures_by_hash_iter != m_textures_by_hash.end())
  2468. {
  2469. m_textures_by_hash.erase(entry->textures_by_hash_iter);
  2470. entry->textures_by_hash_iter = m_textures_by_hash.end();
  2471. }
  2472. // If this is a pending EFB copy, we don't want to flush it here.
  2473. // Why? Because let's say a game is rendering a bloom-type effect, using EFB copies to essentially
  2474. // downscale the framebuffer. Copy from EFB->Texture, draw texture to EFB, copy EFB->Texture,
  2475. // draw, repeat. The second copy will invalidate the first, forcing a flush. Which means we lose
  2476. // any benefit of EFB copy batching. So instead, let's just leave the EFB copy pending, but remove
  2477. // it from the texture cache. This way we don't use the old VRAM copy. When the EFB copies are
  2478. // eventually flushed, they will overwrite each other, and the end result should be the same.
  2479. if (entry->pending_efb_copy)
  2480. {
  2481. if (discard_pending_efb_copy)
  2482. {
  2483. // If the RAM copy is being completely overwritten by a new EFB copy, we can discard the
  2484. // existing pending copy, and not bother waiting for it in the future. This happens in
  2485. // Xenoblade's sunset scene, where 35 copies are done per frame, and 25 of them are
  2486. // copied to the same address, and can be skipped.
  2487. ReleaseEFBCopyStagingTexture(std::move(entry->pending_efb_copy));
  2488. auto pending_it = std::ranges::find(m_pending_efb_copies, entry);
  2489. if (pending_it != m_pending_efb_copies.end())
  2490. m_pending_efb_copies.erase(pending_it);
  2491. }
  2492. else
  2493. {
  2494. // The texture data has already been copied into the staging texture, so it's valid to
  2495. // optimistically release the texture data. Will slightly lower VRAM usage.
  2496. if (!entry->IsLocked())
  2497. ReleaseToPool(entry.get());
  2498. }
  2499. }
  2500. entry->invalidated = true;
  2501. return m_textures_by_address.erase(iter);
  2502. }
  2503. void TextureCacheBase::ReleaseToPool(TCacheEntry* entry)
  2504. {
  2505. if (!entry->texture)
  2506. return;
  2507. auto config = entry->texture->GetConfig();
  2508. m_texture_pool.emplace(config,
  2509. TexPoolEntry(std::move(entry->texture), std::move(entry->framebuffer)));
  2510. }
  2511. bool TextureCacheBase::CreateUtilityTextures()
  2512. {
  2513. constexpr TextureConfig encoding_texture_config(
  2514. EFB_WIDTH * 4, 1024, 1, 1, 1, AbstractTextureFormat::BGRA8, AbstractTextureFlag_RenderTarget,
  2515. AbstractTextureType::Texture_2DArray);
  2516. m_efb_encoding_texture = g_gfx->CreateTexture(encoding_texture_config, "EFB encoding texture");
  2517. if (!m_efb_encoding_texture)
  2518. return false;
  2519. m_efb_encoding_framebuffer = g_gfx->CreateFramebuffer(m_efb_encoding_texture.get(), nullptr);
  2520. if (!m_efb_encoding_framebuffer)
  2521. return false;
  2522. if (g_ActiveConfig.backend_info.bSupportsGPUTextureDecoding)
  2523. {
  2524. constexpr TextureConfig decoding_texture_config(
  2525. 1024, 1024, 1, 1, 1, AbstractTextureFormat::RGBA8, AbstractTextureFlag_ComputeImage,
  2526. AbstractTextureType::Texture_2DArray);
  2527. m_decoding_texture =
  2528. g_gfx->CreateTexture(decoding_texture_config, "GPU texture decoding texture");
  2529. if (!m_decoding_texture)
  2530. return false;
  2531. }
  2532. return true;
  2533. }
  2534. void TextureCacheBase::CopyEFBToCacheEntry(RcTcacheEntry& entry, bool is_depth_copy,
  2535. const MathUtil::Rectangle<int>& src_rect,
  2536. bool scale_by_half, bool linear_filter,
  2537. EFBCopyFormat dst_format, bool is_intensity, float gamma,
  2538. bool clamp_top, bool clamp_bottom,
  2539. const std::array<u32, 3>& filter_coefficients)
  2540. {
  2541. // Flush EFB pokes first, as they're expected to be included.
  2542. g_framebuffer_manager->FlushEFBPokes();
  2543. // Get the pipeline which we will be using. If the compilation failed, this will be null.
  2544. const AbstractPipeline* copy_pipeline = g_shader_cache->GetEFBCopyToVRAMPipeline(
  2545. TextureConversionShaderGen::GetShaderUid(dst_format, is_depth_copy, is_intensity,
  2546. scale_by_half, 1.0f / gamma, filter_coefficients));
  2547. if (!copy_pipeline)
  2548. {
  2549. WARN_LOG_FMT(VIDEO, "Skipping EFB copy to VRAM due to missing pipeline.");
  2550. return;
  2551. }
  2552. const auto scaled_src_rect = g_framebuffer_manager->ConvertEFBRectangle(src_rect);
  2553. const auto framebuffer_rect = g_gfx->ConvertFramebufferRectangle(
  2554. scaled_src_rect, g_framebuffer_manager->GetEFBFramebuffer());
  2555. AbstractTexture* src_texture =
  2556. is_depth_copy ? g_framebuffer_manager->ResolveEFBDepthTexture(framebuffer_rect) :
  2557. g_framebuffer_manager->ResolveEFBColorTexture(framebuffer_rect);
  2558. src_texture->FinishedRendering();
  2559. g_gfx->BeginUtilityDrawing();
  2560. // Fill uniform buffer.
  2561. struct Uniforms
  2562. {
  2563. float src_left, src_top, src_width, src_height;
  2564. std::array<u32, 3> filter_coefficients;
  2565. float gamma_rcp;
  2566. float clamp_top;
  2567. float clamp_bottom;
  2568. float pixel_height;
  2569. u32 padding;
  2570. };
  2571. Uniforms uniforms;
  2572. const float rcp_efb_width = 1.0f / static_cast<float>(g_framebuffer_manager->GetEFBWidth());
  2573. const u32 efb_height = g_framebuffer_manager->GetEFBHeight();
  2574. const float rcp_efb_height = 1.0f / static_cast<float>(efb_height);
  2575. uniforms.src_left = framebuffer_rect.left * rcp_efb_width;
  2576. uniforms.src_top = framebuffer_rect.top * rcp_efb_height;
  2577. uniforms.src_width = framebuffer_rect.GetWidth() * rcp_efb_width;
  2578. uniforms.src_height = framebuffer_rect.GetHeight() * rcp_efb_height;
  2579. uniforms.filter_coefficients = filter_coefficients;
  2580. uniforms.gamma_rcp = 1.0f / gamma;
  2581. // NOTE: when the clamp bits aren't set, the hardware will happily read beyond the EFB,
  2582. // which returns random garbage from the empty bus (confirmed by hardware tests).
  2583. //
  2584. // In our implementation, the garbage just so happens to be the top or bottom row.
  2585. // Statistically, that could happen.
  2586. const u32 top_coord = clamp_top ? framebuffer_rect.top : 0;
  2587. uniforms.clamp_top = (static_cast<float>(top_coord) + .5f) * rcp_efb_height;
  2588. const u32 bottom_coord = (clamp_bottom ? framebuffer_rect.bottom : efb_height) - 1;
  2589. uniforms.clamp_bottom = (static_cast<float>(bottom_coord) + .5f) * rcp_efb_height;
  2590. uniforms.pixel_height = g_ActiveConfig.bCopyEFBScaled ? rcp_efb_height : 1.0f / EFB_HEIGHT;
  2591. uniforms.padding = 0;
  2592. g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
  2593. // Use the copy pipeline to render the VRAM copy.
  2594. g_gfx->SetAndDiscardFramebuffer(entry->framebuffer.get());
  2595. g_gfx->SetViewportAndScissor(entry->framebuffer->GetRect());
  2596. g_gfx->SetPipeline(copy_pipeline);
  2597. g_gfx->SetTexture(0, src_texture);
  2598. g_gfx->SetSamplerState(0, linear_filter ? RenderState::GetLinearSamplerState() :
  2599. RenderState::GetPointSamplerState());
  2600. g_gfx->Draw(0, 3);
  2601. g_gfx->EndUtilityDrawing();
  2602. entry->texture->FinishedRendering();
  2603. }
  2604. void TextureCacheBase::CopyEFB(AbstractStagingTexture* dst, const EFBCopyParams& params,
  2605. u32 native_width, u32 bytes_per_row, u32 num_blocks_y,
  2606. u32 memory_stride, const MathUtil::Rectangle<int>& src_rect,
  2607. bool scale_by_half, bool linear_filter, float y_scale, float gamma,
  2608. bool clamp_top, bool clamp_bottom,
  2609. const std::array<u32, 3>& filter_coefficients)
  2610. {
  2611. // Flush EFB pokes first, as they're expected to be included.
  2612. g_framebuffer_manager->FlushEFBPokes();
  2613. // Get the pipeline which we will be using. If the compilation failed, this will be null.
  2614. const AbstractPipeline* copy_pipeline = g_shader_cache->GetEFBCopyToRAMPipeline(params);
  2615. if (!copy_pipeline)
  2616. {
  2617. WARN_LOG_FMT(VIDEO, "Skipping EFB copy to VRAM due to missing pipeline.");
  2618. return;
  2619. }
  2620. const auto scaled_src_rect = g_framebuffer_manager->ConvertEFBRectangle(src_rect);
  2621. const auto framebuffer_rect = g_gfx->ConvertFramebufferRectangle(
  2622. scaled_src_rect, g_framebuffer_manager->GetEFBFramebuffer());
  2623. AbstractTexture* src_texture =
  2624. params.depth ? g_framebuffer_manager->ResolveEFBDepthTexture(framebuffer_rect) :
  2625. g_framebuffer_manager->ResolveEFBColorTexture(framebuffer_rect);
  2626. src_texture->FinishedRendering();
  2627. g_gfx->BeginUtilityDrawing();
  2628. // Fill uniform buffer.
  2629. struct Uniforms
  2630. {
  2631. std::array<s32, 4> position_uniform;
  2632. float y_scale;
  2633. float gamma_rcp;
  2634. float clamp_top;
  2635. float clamp_bottom;
  2636. std::array<u32, 3> filter_coefficients;
  2637. u32 padding;
  2638. };
  2639. Uniforms encoder_params;
  2640. const u32 efb_height = g_framebuffer_manager->GetEFBHeight();
  2641. const float rcp_efb_height = 1.0f / static_cast<float>(efb_height);
  2642. encoder_params.position_uniform[0] = src_rect.left;
  2643. encoder_params.position_uniform[1] = src_rect.top;
  2644. encoder_params.position_uniform[2] = static_cast<s32>(native_width);
  2645. encoder_params.position_uniform[3] = scale_by_half ? 2 : 1;
  2646. encoder_params.y_scale = y_scale;
  2647. encoder_params.gamma_rcp = 1.0f / gamma;
  2648. // NOTE: when the clamp bits aren't set, the hardware will happily read beyond the EFB,
  2649. // which returns random garbage from the empty bus (confirmed by hardware tests).
  2650. //
  2651. // In our implementation, the garbage just so happens to be the top or bottom row.
  2652. // Statistically, that could happen.
  2653. const u32 top_coord = clamp_top ? framebuffer_rect.top : 0;
  2654. encoder_params.clamp_top = (static_cast<float>(top_coord) + .5f) * rcp_efb_height;
  2655. const u32 bottom_coord = (clamp_bottom ? framebuffer_rect.bottom : efb_height) - 1;
  2656. encoder_params.clamp_bottom = (static_cast<float>(bottom_coord) + .5f) * rcp_efb_height;
  2657. encoder_params.filter_coefficients = filter_coefficients;
  2658. g_vertex_manager->UploadUtilityUniforms(&encoder_params, sizeof(encoder_params));
  2659. // Because the shader uses gl_FragCoord and we read it back, we must render to the lower-left.
  2660. const u32 render_width = bytes_per_row / sizeof(u32);
  2661. const u32 render_height = num_blocks_y;
  2662. const auto encode_rect = MathUtil::Rectangle<int>(0, 0, render_width, render_height);
  2663. // Render to GPU texture, and then copy to CPU-accessible texture.
  2664. g_gfx->SetAndDiscardFramebuffer(m_efb_encoding_framebuffer.get());
  2665. g_gfx->SetViewportAndScissor(encode_rect);
  2666. g_gfx->SetPipeline(copy_pipeline);
  2667. g_gfx->SetTexture(0, src_texture);
  2668. g_gfx->SetSamplerState(0, linear_filter ? RenderState::GetLinearSamplerState() :
  2669. RenderState::GetPointSamplerState());
  2670. g_gfx->Draw(0, 3);
  2671. dst->CopyFromTexture(m_efb_encoding_texture.get(), encode_rect, 0, 0, encode_rect);
  2672. g_gfx->EndUtilityDrawing();
  2673. // Flush if there's sufficient draws between this copy and the last.
  2674. g_vertex_manager->OnEFBCopyToRAM();
  2675. }
  2676. bool TextureCacheBase::DecodeTextureOnGPU(RcTcacheEntry& entry, u32 dst_level, const u8* data,
  2677. u32 data_size, TextureFormat format, u32 width,
  2678. u32 height, u32 aligned_width, u32 aligned_height,
  2679. u32 row_stride, const u8* palette,
  2680. TLUTFormat palette_format)
  2681. {
  2682. const auto* info = TextureConversionShaderTiled::GetDecodingShaderInfo(format);
  2683. if (!info)
  2684. return false;
  2685. const AbstractShader* shader = g_shader_cache->GetTextureDecodingShader(
  2686. format, info->palette_size != 0 ? std::make_optional(palette_format) : std::nullopt);
  2687. if (!shader)
  2688. return false;
  2689. // Copy to GPU-visible buffer, aligned to the data type.
  2690. const u32 bytes_per_buffer_elem =
  2691. VertexManagerBase::GetTexelBufferElementSize(info->buffer_format);
  2692. // Allocate space in stream buffer, and copy texture + palette across.
  2693. u32 src_offset = 0, palette_offset = 0;
  2694. if (info->palette_size > 0)
  2695. {
  2696. if (!g_vertex_manager->UploadTexelBuffer(data, data_size, info->buffer_format, &src_offset,
  2697. palette, info->palette_size,
  2698. TEXEL_BUFFER_FORMAT_R16_UINT, &palette_offset))
  2699. {
  2700. return false;
  2701. }
  2702. }
  2703. else
  2704. {
  2705. if (!g_vertex_manager->UploadTexelBuffer(data, data_size, info->buffer_format, &src_offset))
  2706. return false;
  2707. }
  2708. // Set up uniforms.
  2709. struct Uniforms
  2710. {
  2711. u32 dst_width, dst_height;
  2712. u32 src_width, src_height;
  2713. u32 src_offset, src_row_stride;
  2714. u32 palette_offset, unused;
  2715. } uniforms = {width, height, aligned_width,
  2716. aligned_height, src_offset, row_stride / bytes_per_buffer_elem,
  2717. palette_offset};
  2718. g_vertex_manager->UploadUtilityUniforms(&uniforms, sizeof(uniforms));
  2719. g_gfx->SetComputeImageTexture(0, m_decoding_texture.get(), false, true);
  2720. auto dispatch_groups =
  2721. TextureConversionShaderTiled::GetDispatchCount(info, aligned_width, aligned_height);
  2722. g_gfx->DispatchComputeShader(shader, info->group_size_x, info->group_size_y, 1,
  2723. dispatch_groups.first, dispatch_groups.second, 1);
  2724. // Copy from decoding texture -> final texture
  2725. // This is because we don't want to have to create compute view for every layer
  2726. const auto copy_rect = entry->texture->GetConfig().GetMipRect(dst_level);
  2727. entry->texture->CopyRectangleFromTexture(m_decoding_texture.get(), copy_rect, 0, 0, copy_rect, 0,
  2728. dst_level);
  2729. entry->texture->FinishedRendering();
  2730. return true;
  2731. }
  2732. u32 TCacheEntry::BytesPerRow() const
  2733. {
  2734. // RGBA takes two cache lines per block; all others take one
  2735. const u32 bytes_per_block = format == TextureFormat::RGBA8 ? 64 : 32;
  2736. return NumBlocksX() * bytes_per_block;
  2737. }
  2738. u32 TCacheEntry::NumBlocksX() const
  2739. {
  2740. const u32 blockW = TexDecoder_GetBlockWidthInTexels(format.texfmt);
  2741. // Round up source height to multiple of block size
  2742. const u32 actualWidth = Common::AlignUp(native_width, blockW);
  2743. return actualWidth / blockW;
  2744. }
  2745. u32 TCacheEntry::NumBlocksY() const
  2746. {
  2747. u32 blockH = TexDecoder_GetBlockHeightInTexels(format.texfmt);
  2748. // Round up source height to multiple of block size
  2749. u32 actualHeight = Common::AlignUp(native_height, blockH);
  2750. return actualHeight / blockH;
  2751. }
  2752. void TCacheEntry::SetXfbCopy(u32 stride)
  2753. {
  2754. is_efb_copy = false;
  2755. is_xfb_copy = true;
  2756. is_xfb_container = false;
  2757. memory_stride = stride;
  2758. ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
  2759. size_in_bytes = memory_stride * NumBlocksY();
  2760. }
  2761. void TCacheEntry::SetEfbCopy(u32 stride)
  2762. {
  2763. is_efb_copy = true;
  2764. is_xfb_copy = false;
  2765. is_xfb_container = false;
  2766. memory_stride = stride;
  2767. ASSERT_MSG(VIDEO, memory_stride >= BytesPerRow(), "Memory stride is too small");
  2768. size_in_bytes = memory_stride * NumBlocksY();
  2769. }
  2770. void TCacheEntry::SetNotCopy()
  2771. {
  2772. is_efb_copy = false;
  2773. is_xfb_copy = false;
  2774. is_xfb_container = false;
  2775. }
  2776. int TCacheEntry::HashSampleSize() const
  2777. {
  2778. if (should_force_safe_hashing)
  2779. {
  2780. return 0;
  2781. }
  2782. return g_ActiveConfig.iSafeTextureCache_ColorSamples;
  2783. }
  2784. u64 TCacheEntry::CalculateHash() const
  2785. {
  2786. const u32 bytes_per_row = BytesPerRow();
  2787. const u32 hash_sample_size = HashSampleSize();
  2788. // FIXME: textures from tmem won't get the correct hash.
  2789. auto& system = Core::System::GetInstance();
  2790. auto& memory = system.GetMemory();
  2791. u8* ptr = memory.GetPointerForRange(addr, size_in_bytes);
  2792. if (memory_stride == bytes_per_row)
  2793. {
  2794. return Common::GetHash64(ptr, size_in_bytes, hash_sample_size);
  2795. }
  2796. else
  2797. {
  2798. const u32 num_blocks_y = NumBlocksY();
  2799. u64 temp_hash = size_in_bytes;
  2800. u32 samples_per_row = 0;
  2801. if (hash_sample_size != 0)
  2802. {
  2803. // Hash at least 4 samples per row to avoid hashing in a bad pattern, like just on the left
  2804. // side of the efb copy
  2805. samples_per_row = std::max(hash_sample_size / num_blocks_y, 4u);
  2806. }
  2807. for (u32 i = 0; i < num_blocks_y; i++)
  2808. {
  2809. // Multiply by a prime number to mix the hash up a bit. This prevents identical blocks from
  2810. // canceling each other out
  2811. temp_hash = (temp_hash * 397) ^ Common::GetHash64(ptr, bytes_per_row, samples_per_row);
  2812. ptr += memory_stride;
  2813. }
  2814. return temp_hash;
  2815. }
  2816. }
  2817. TextureCacheBase::TexPoolEntry::TexPoolEntry(std::unique_ptr<AbstractTexture> tex,
  2818. std::unique_ptr<AbstractFramebuffer> fb)
  2819. : texture(std::move(tex)), framebuffer(std::move(fb))
  2820. {
  2821. }