rendering_device_driver_metal.mm 151 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196
  1. /**************************************************************************/
  2. /* rendering_device_driver_metal.mm */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. /**************************************************************************/
  31. /* */
  32. /* Portions of this code were derived from MoltenVK. */
  33. /* */
  34. /* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
  35. /* (http://www.brenwill.com) */
  36. /* */
  37. /* Licensed under the Apache License, Version 2.0 (the "License"); */
  38. /* you may not use this file except in compliance with the License. */
  39. /* You may obtain a copy of the License at */
  40. /* */
  41. /* http://www.apache.org/licenses/LICENSE-2.0 */
  42. /* */
  43. /* Unless required by applicable law or agreed to in writing, software */
  44. /* distributed under the License is distributed on an "AS IS" BASIS, */
  45. /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
  46. /* implied. See the License for the specific language governing */
  47. /* permissions and limitations under the License. */
  48. /**************************************************************************/
  49. #import "rendering_device_driver_metal.h"
  50. #import "pixel_formats.h"
  51. #import "rendering_context_driver_metal.h"
  52. #import "core/io/compression.h"
  53. #import "core/io/marshalls.h"
  54. #import "core/string/ustring.h"
  55. #import "core/templates/hash_map.h"
  56. #import <Metal/MTLTexture.h>
  57. #import <Metal/Metal.h>
  58. #import <os/log.h>
  59. #import <os/signpost.h>
  60. #import <spirv.hpp>
  61. #import <spirv_msl.hpp>
  62. #import <spirv_parser.hpp>
  63. #pragma mark - Logging
  64. os_log_t LOG_DRIVER;
  65. // Used for dynamic tracing.
  66. os_log_t LOG_INTERVALS;
  67. __attribute__((constructor)) static void InitializeLogging(void) {
  68. LOG_DRIVER = os_log_create("org.godotengine.godot.metal", OS_LOG_CATEGORY_POINTS_OF_INTEREST);
  69. LOG_INTERVALS = os_log_create("org.godotengine.godot.metal", "events");
  70. }
  71. /*****************/
  72. /**** GENERIC ****/
  73. /*****************/
  74. // RDD::CompareOperator == VkCompareOp.
  75. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_NEVER, MTLCompareFunctionNever));
  76. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_LESS, MTLCompareFunctionLess));
  77. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_EQUAL, MTLCompareFunctionEqual));
  78. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_LESS_OR_EQUAL, MTLCompareFunctionLessEqual));
  79. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_GREATER, MTLCompareFunctionGreater));
  80. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_NOT_EQUAL, MTLCompareFunctionNotEqual));
  81. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_GREATER_OR_EQUAL, MTLCompareFunctionGreaterEqual));
  82. static_assert(ENUM_MEMBERS_EQUAL(RDD::COMPARE_OP_ALWAYS, MTLCompareFunctionAlways));
  83. _FORCE_INLINE_ MTLSize mipmapLevelSizeFromTexture(id<MTLTexture> p_tex, NSUInteger p_level) {
  84. MTLSize lvlSize;
  85. lvlSize.width = MAX(p_tex.width >> p_level, 1UL);
  86. lvlSize.height = MAX(p_tex.height >> p_level, 1UL);
  87. lvlSize.depth = MAX(p_tex.depth >> p_level, 1UL);
  88. return lvlSize;
  89. }
  90. _FORCE_INLINE_ MTLSize mipmapLevelSizeFromSize(MTLSize p_size, NSUInteger p_level) {
  91. if (p_level == 0) {
  92. return p_size;
  93. }
  94. MTLSize lvlSize;
  95. lvlSize.width = MAX(p_size.width >> p_level, 1UL);
  96. lvlSize.height = MAX(p_size.height >> p_level, 1UL);
  97. lvlSize.depth = MAX(p_size.depth >> p_level, 1UL);
  98. return lvlSize;
  99. }
  100. _FORCE_INLINE_ static bool operator==(MTLSize p_a, MTLSize p_b) {
  101. return p_a.width == p_b.width && p_a.height == p_b.height && p_a.depth == p_b.depth;
  102. }
  103. /*****************/
  104. /**** BUFFERS ****/
  105. /*****************/
  106. RDD::BufferID RenderingDeviceDriverMetal::buffer_create(uint64_t p_size, BitField<BufferUsageBits> p_usage, MemoryAllocationType p_allocation_type) {
  107. MTLResourceOptions options = MTLResourceHazardTrackingModeTracked;
  108. switch (p_allocation_type) {
  109. case MEMORY_ALLOCATION_TYPE_CPU:
  110. options |= MTLResourceStorageModeShared;
  111. break;
  112. case MEMORY_ALLOCATION_TYPE_GPU:
  113. options |= MTLResourceStorageModePrivate;
  114. break;
  115. }
  116. id<MTLBuffer> obj = [device newBufferWithLength:p_size options:options];
  117. ERR_FAIL_NULL_V_MSG(obj, BufferID(), "Can't create buffer of size: " + itos(p_size));
  118. return rid::make(obj);
  119. }
  120. bool RenderingDeviceDriverMetal::buffer_set_texel_format(BufferID p_buffer, DataFormat p_format) {
  121. // Nothing to do.
  122. return true;
  123. }
  124. void RenderingDeviceDriverMetal::buffer_free(BufferID p_buffer) {
  125. rid::release(p_buffer);
  126. }
  127. uint64_t RenderingDeviceDriverMetal::buffer_get_allocation_size(BufferID p_buffer) {
  128. id<MTLBuffer> obj = rid::get(p_buffer);
  129. return obj.allocatedSize;
  130. }
  131. uint8_t *RenderingDeviceDriverMetal::buffer_map(BufferID p_buffer) {
  132. id<MTLBuffer> obj = rid::get(p_buffer);
  133. ERR_FAIL_COND_V_MSG(obj.storageMode != MTLStorageModeShared, nullptr, "Unable to map private buffers");
  134. return (uint8_t *)obj.contents;
  135. }
  136. void RenderingDeviceDriverMetal::buffer_unmap(BufferID p_buffer) {
  137. // Nothing to do.
  138. }
  139. uint64_t RenderingDeviceDriverMetal::buffer_get_device_address(BufferID p_buffer) {
  140. if (@available(iOS 16.0, macOS 13.0, *)) {
  141. id<MTLBuffer> obj = rid::get(p_buffer);
  142. return obj.gpuAddress;
  143. } else {
  144. #if DEV_ENABLED
  145. WARN_PRINT_ONCE("buffer_get_device_address is not supported on this OS version.");
  146. #endif
  147. return 0;
  148. }
  149. }
  150. #pragma mark - Texture
  151. #pragma mark - Format Conversions
  152. static const MTLTextureType TEXTURE_TYPE[RD::TEXTURE_TYPE_MAX] = {
  153. MTLTextureType1D,
  154. MTLTextureType2D,
  155. MTLTextureType3D,
  156. MTLTextureTypeCube,
  157. MTLTextureType1DArray,
  158. MTLTextureType2DArray,
  159. MTLTextureTypeCubeArray,
  160. };
  161. RenderingDeviceDriverMetal::Result<bool> RenderingDeviceDriverMetal::is_valid_linear(TextureFormat const &p_format) const {
  162. if (!flags::any(p_format.usage_bits, TEXTURE_USAGE_CPU_READ_BIT)) {
  163. return false;
  164. }
  165. PixelFormats &pf = *pixel_formats;
  166. MTLFormatType ft = pf.getFormatType(p_format.format);
  167. // Requesting a linear format, which has further restrictions, similar to Vulkan
  168. // when specifying VK_IMAGE_TILING_LINEAR.
  169. ERR_FAIL_COND_V_MSG(p_format.texture_type != TEXTURE_TYPE_2D, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must be 2D");
  170. ERR_FAIL_COND_V_MSG(ft != MTLFormatType::DepthStencil, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must not be a depth/stencil format");
  171. ERR_FAIL_COND_V_MSG(ft != MTLFormatType::Compressed, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must not be a compressed format");
  172. ERR_FAIL_COND_V_MSG(p_format.mipmaps != 1, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must have 1 mipmap level");
  173. ERR_FAIL_COND_V_MSG(p_format.array_layers != 1, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must have 1 array layer");
  174. ERR_FAIL_COND_V_MSG(p_format.samples != TEXTURE_SAMPLES_1, ERR_CANT_CREATE, "Linear (TEXTURE_USAGE_CPU_READ_BIT) textures must have 1 sample");
  175. return true;
  176. }
  177. RDD::TextureID RenderingDeviceDriverMetal::texture_create(const TextureFormat &p_format, const TextureView &p_view) {
  178. MTLTextureDescriptor *desc = [MTLTextureDescriptor new];
  179. desc.textureType = TEXTURE_TYPE[p_format.texture_type];
  180. PixelFormats &formats = *pixel_formats;
  181. desc.pixelFormat = formats.getMTLPixelFormat(p_format.format);
  182. MTLFmtCaps format_caps = formats.getCapabilities(desc.pixelFormat);
  183. desc.width = p_format.width;
  184. desc.height = p_format.height;
  185. desc.depth = p_format.depth;
  186. desc.mipmapLevelCount = p_format.mipmaps;
  187. if (p_format.texture_type == TEXTURE_TYPE_1D_ARRAY ||
  188. p_format.texture_type == TEXTURE_TYPE_2D_ARRAY) {
  189. desc.arrayLength = p_format.array_layers;
  190. } else if (p_format.texture_type == TEXTURE_TYPE_CUBE_ARRAY) {
  191. desc.arrayLength = p_format.array_layers / 6;
  192. }
  193. // TODO(sgc): Evaluate lossy texture support (perhaps as a project option?)
  194. // https://developer.apple.com/videos/play/tech-talks/10876?time=459
  195. // desc.compressionType = MTLTextureCompressionTypeLossy;
  196. if (p_format.samples > TEXTURE_SAMPLES_1) {
  197. SampleCount supported = (*device_properties).find_nearest_supported_sample_count(p_format.samples);
  198. if (supported > SampleCount1) {
  199. bool ok = p_format.texture_type == TEXTURE_TYPE_2D || p_format.texture_type == TEXTURE_TYPE_2D_ARRAY;
  200. if (ok) {
  201. switch (p_format.texture_type) {
  202. case TEXTURE_TYPE_2D:
  203. desc.textureType = MTLTextureType2DMultisample;
  204. break;
  205. case TEXTURE_TYPE_2D_ARRAY:
  206. desc.textureType = MTLTextureType2DMultisampleArray;
  207. break;
  208. default:
  209. break;
  210. }
  211. desc.sampleCount = (NSUInteger)supported;
  212. if (p_format.mipmaps > 1) {
  213. // For a buffer-backed or multi-sample texture, the value must be 1.
  214. WARN_PRINT("mipmaps == 1 for multi-sample textures");
  215. desc.mipmapLevelCount = 1;
  216. }
  217. } else {
  218. WARN_PRINT("Unsupported multi-sample texture type; disabling multi-sample");
  219. }
  220. }
  221. }
  222. static const MTLTextureSwizzle COMPONENT_SWIZZLE[TEXTURE_SWIZZLE_MAX] = {
  223. static_cast<MTLTextureSwizzle>(255), // IDENTITY
  224. MTLTextureSwizzleZero,
  225. MTLTextureSwizzleOne,
  226. MTLTextureSwizzleRed,
  227. MTLTextureSwizzleGreen,
  228. MTLTextureSwizzleBlue,
  229. MTLTextureSwizzleAlpha,
  230. };
  231. MTLTextureSwizzleChannels swizzle = MTLTextureSwizzleChannelsMake(
  232. p_view.swizzle_r != TEXTURE_SWIZZLE_IDENTITY ? COMPONENT_SWIZZLE[p_view.swizzle_r] : MTLTextureSwizzleRed,
  233. p_view.swizzle_g != TEXTURE_SWIZZLE_IDENTITY ? COMPONENT_SWIZZLE[p_view.swizzle_g] : MTLTextureSwizzleGreen,
  234. p_view.swizzle_b != TEXTURE_SWIZZLE_IDENTITY ? COMPONENT_SWIZZLE[p_view.swizzle_b] : MTLTextureSwizzleBlue,
  235. p_view.swizzle_a != TEXTURE_SWIZZLE_IDENTITY ? COMPONENT_SWIZZLE[p_view.swizzle_a] : MTLTextureSwizzleAlpha);
  236. // Represents a swizzle operation that is a no-op.
  237. static MTLTextureSwizzleChannels IDENTITY_SWIZZLE = {
  238. .red = MTLTextureSwizzleRed,
  239. .green = MTLTextureSwizzleGreen,
  240. .blue = MTLTextureSwizzleBlue,
  241. .alpha = MTLTextureSwizzleAlpha,
  242. };
  243. bool no_swizzle = memcmp(&IDENTITY_SWIZZLE, &swizzle, sizeof(MTLTextureSwizzleChannels)) == 0;
  244. if (!no_swizzle) {
  245. desc.swizzle = swizzle;
  246. }
  247. // Usage.
  248. MTLResourceOptions options = 0;
  249. const bool supports_memoryless = (*device_properties).features.highestFamily >= MTLGPUFamilyApple2 && (*device_properties).features.highestFamily < MTLGPUFamilyMac1;
  250. if (supports_memoryless && p_format.usage_bits & TEXTURE_USAGE_TRANSIENT_BIT) {
  251. options = MTLResourceStorageModeMemoryless | MTLResourceHazardTrackingModeTracked;
  252. desc.storageMode = MTLStorageModeMemoryless;
  253. } else {
  254. options = MTLResourceCPUCacheModeDefaultCache | MTLResourceHazardTrackingModeTracked;
  255. if (p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT) {
  256. options |= MTLResourceStorageModeShared;
  257. } else {
  258. options |= MTLResourceStorageModePrivate;
  259. }
  260. }
  261. desc.resourceOptions = options;
  262. if (p_format.usage_bits & TEXTURE_USAGE_SAMPLING_BIT) {
  263. desc.usage |= MTLTextureUsageShaderRead;
  264. }
  265. if (p_format.usage_bits & TEXTURE_USAGE_STORAGE_BIT) {
  266. desc.usage |= MTLTextureUsageShaderWrite;
  267. }
  268. if (@available(macOS 14.0, iOS 17.0, tvOS 17.0, *)) {
  269. if (format_caps & kMTLFmtCapsAtomic) {
  270. desc.usage |= MTLTextureUsageShaderAtomic;
  271. }
  272. }
  273. bool can_be_attachment = flags::any(format_caps, (kMTLFmtCapsColorAtt | kMTLFmtCapsDSAtt));
  274. if (flags::any(p_format.usage_bits, TEXTURE_USAGE_COLOR_ATTACHMENT_BIT | TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
  275. can_be_attachment) {
  276. desc.usage |= MTLTextureUsageRenderTarget;
  277. }
  278. if (p_format.usage_bits & TEXTURE_USAGE_INPUT_ATTACHMENT_BIT) {
  279. desc.usage |= MTLTextureUsageShaderRead;
  280. }
  281. if (p_format.usage_bits & TEXTURE_USAGE_VRS_ATTACHMENT_BIT) {
  282. ERR_FAIL_V_MSG(RDD::TextureID(), "unsupported: TEXTURE_USAGE_VRS_ATTACHMENT_BIT");
  283. }
  284. if (flags::any(p_format.usage_bits, TEXTURE_USAGE_CAN_UPDATE_BIT | TEXTURE_USAGE_CAN_COPY_TO_BIT) &&
  285. can_be_attachment && no_swizzle) {
  286. // Per MoltenVK, can be cleared as a render attachment.
  287. desc.usage |= MTLTextureUsageRenderTarget;
  288. }
  289. if (p_format.usage_bits & TEXTURE_USAGE_CAN_COPY_FROM_BIT) {
  290. // Covered by blits.
  291. }
  292. // Create texture views with a different component layout.
  293. if (!p_format.shareable_formats.is_empty()) {
  294. desc.usage |= MTLTextureUsagePixelFormatView;
  295. }
  296. // Allocate memory.
  297. bool is_linear;
  298. {
  299. Result<bool> is_linear_or_err = is_valid_linear(p_format);
  300. ERR_FAIL_COND_V(std::holds_alternative<Error>(is_linear_or_err), TextureID());
  301. is_linear = std::get<bool>(is_linear_or_err);
  302. }
  303. // Check if it is a linear format for atomic operations and therefore needs a buffer,
  304. // as generally Metal does not support atomic operations on textures.
  305. bool needs_buffer = is_linear || (p_format.array_layers == 1 && p_format.mipmaps == 1 && p_format.texture_type == TEXTURE_TYPE_2D && flags::any(p_format.usage_bits, TEXTURE_USAGE_STORAGE_BIT) && (p_format.format == DATA_FORMAT_R32_UINT || p_format.format == DATA_FORMAT_R32_SINT || p_format.format == DATA_FORMAT_R32G32_UINT || p_format.format == DATA_FORMAT_R32G32_SINT));
  306. id<MTLTexture> obj = nil;
  307. if (needs_buffer) {
  308. // Linear textures are restricted to 2D textures, a single mipmap level and a single array layer.
  309. MTLPixelFormat pixel_format = desc.pixelFormat;
  310. size_t row_alignment = get_texel_buffer_alignment_for_format(p_format.format);
  311. size_t bytes_per_row = formats.getBytesPerRow(pixel_format, p_format.width);
  312. bytes_per_row = round_up_to_alignment(bytes_per_row, row_alignment);
  313. size_t bytes_per_layer = formats.getBytesPerLayer(pixel_format, bytes_per_row, p_format.height);
  314. size_t byte_count = bytes_per_layer * p_format.depth * p_format.array_layers;
  315. id<MTLBuffer> buf = [device newBufferWithLength:byte_count options:options];
  316. obj = [buf newTextureWithDescriptor:desc offset:0 bytesPerRow:bytes_per_row];
  317. } else {
  318. obj = [device newTextureWithDescriptor:desc];
  319. }
  320. ERR_FAIL_NULL_V_MSG(obj, TextureID(), "Unable to create texture.");
  321. return rid::make(obj);
  322. }
  323. RDD::TextureID RenderingDeviceDriverMetal::texture_create_from_extension(uint64_t p_native_texture, TextureType p_type, DataFormat p_format, uint32_t p_array_layers, bool p_depth_stencil) {
  324. id<MTLTexture> res = (__bridge id<MTLTexture>)(void *)(uintptr_t)p_native_texture;
  325. // If the requested format is different, we need to create a view.
  326. MTLPixelFormat format = pixel_formats->getMTLPixelFormat(p_format);
  327. if (res.pixelFormat != format) {
  328. MTLTextureSwizzleChannels swizzle = MTLTextureSwizzleChannelsMake(
  329. MTLTextureSwizzleRed,
  330. MTLTextureSwizzleGreen,
  331. MTLTextureSwizzleBlue,
  332. MTLTextureSwizzleAlpha);
  333. res = [res newTextureViewWithPixelFormat:format
  334. textureType:res.textureType
  335. levels:NSMakeRange(0, res.mipmapLevelCount)
  336. slices:NSMakeRange(0, p_array_layers)
  337. swizzle:swizzle];
  338. ERR_FAIL_NULL_V_MSG(res, TextureID(), "Unable to create texture view.");
  339. }
  340. return rid::make(res);
  341. }
  342. RDD::TextureID RenderingDeviceDriverMetal::texture_create_shared(TextureID p_original_texture, const TextureView &p_view) {
  343. id<MTLTexture> src_texture = rid::get(p_original_texture);
  344. NSUInteger slices = src_texture.arrayLength;
  345. if (src_texture.textureType == MTLTextureTypeCube) {
  346. // Metal expects Cube textures to have a slice count of 6.
  347. slices = 6;
  348. } else if (src_texture.textureType == MTLTextureTypeCubeArray) {
  349. // Metal expects Cube Array textures to have 6 slices per layer.
  350. slices *= 6;
  351. }
  352. #if DEV_ENABLED
  353. if (src_texture.sampleCount > 1) {
  354. // TODO(sgc): is it ok to create a shared texture from a multi-sample texture?
  355. WARN_PRINT("Is it safe to create a shared texture from multi-sample texture?");
  356. }
  357. #endif
  358. MTLPixelFormat format = pixel_formats->getMTLPixelFormat(p_view.format);
  359. static const MTLTextureSwizzle component_swizzle[TEXTURE_SWIZZLE_MAX] = {
  360. static_cast<MTLTextureSwizzle>(255), // IDENTITY
  361. MTLTextureSwizzleZero,
  362. MTLTextureSwizzleOne,
  363. MTLTextureSwizzleRed,
  364. MTLTextureSwizzleGreen,
  365. MTLTextureSwizzleBlue,
  366. MTLTextureSwizzleAlpha,
  367. };
  368. #define SWIZZLE(C, CHAN) (p_view.swizzle_##C != TEXTURE_SWIZZLE_IDENTITY ? component_swizzle[p_view.swizzle_##C] : MTLTextureSwizzle##CHAN)
  369. MTLTextureSwizzleChannels swizzle = MTLTextureSwizzleChannelsMake(
  370. SWIZZLE(r, Red),
  371. SWIZZLE(g, Green),
  372. SWIZZLE(b, Blue),
  373. SWIZZLE(a, Alpha));
  374. #undef SWIZZLE
  375. id<MTLTexture> obj = [src_texture newTextureViewWithPixelFormat:format
  376. textureType:src_texture.textureType
  377. levels:NSMakeRange(0, src_texture.mipmapLevelCount)
  378. slices:NSMakeRange(0, slices)
  379. swizzle:swizzle];
  380. ERR_FAIL_NULL_V_MSG(obj, TextureID(), "Unable to create shared texture");
  381. return rid::make(obj);
  382. }
  383. RDD::TextureID RenderingDeviceDriverMetal::texture_create_shared_from_slice(TextureID p_original_texture, const TextureView &p_view, TextureSliceType p_slice_type, uint32_t p_layer, uint32_t p_layers, uint32_t p_mipmap, uint32_t p_mipmaps) {
  384. id<MTLTexture> src_texture = rid::get(p_original_texture);
  385. static const MTLTextureType VIEW_TYPES[] = {
  386. MTLTextureType1D, // MTLTextureType1D
  387. MTLTextureType1D, // MTLTextureType1DArray
  388. MTLTextureType2D, // MTLTextureType2D
  389. MTLTextureType2D, // MTLTextureType2DArray
  390. MTLTextureType2D, // MTLTextureType2DMultisample
  391. MTLTextureType2D, // MTLTextureTypeCube
  392. MTLTextureType2D, // MTLTextureTypeCubeArray
  393. MTLTextureType2D, // MTLTextureType3D
  394. MTLTextureType2D, // MTLTextureType2DMultisampleArray
  395. };
  396. MTLTextureType textureType = VIEW_TYPES[src_texture.textureType];
  397. switch (p_slice_type) {
  398. case TEXTURE_SLICE_2D: {
  399. textureType = MTLTextureType2D;
  400. } break;
  401. case TEXTURE_SLICE_3D: {
  402. textureType = MTLTextureType3D;
  403. } break;
  404. case TEXTURE_SLICE_CUBEMAP: {
  405. textureType = MTLTextureTypeCube;
  406. } break;
  407. case TEXTURE_SLICE_2D_ARRAY: {
  408. textureType = MTLTextureType2DArray;
  409. } break;
  410. case TEXTURE_SLICE_MAX: {
  411. ERR_FAIL_V_MSG(TextureID(), "Invalid texture slice type");
  412. } break;
  413. }
  414. MTLPixelFormat format = pixel_formats->getMTLPixelFormat(p_view.format);
  415. static const MTLTextureSwizzle component_swizzle[TEXTURE_SWIZZLE_MAX] = {
  416. static_cast<MTLTextureSwizzle>(255), // IDENTITY
  417. MTLTextureSwizzleZero,
  418. MTLTextureSwizzleOne,
  419. MTLTextureSwizzleRed,
  420. MTLTextureSwizzleGreen,
  421. MTLTextureSwizzleBlue,
  422. MTLTextureSwizzleAlpha,
  423. };
  424. #define SWIZZLE(C, CHAN) (p_view.swizzle_##C != TEXTURE_SWIZZLE_IDENTITY ? component_swizzle[p_view.swizzle_##C] : MTLTextureSwizzle##CHAN)
  425. MTLTextureSwizzleChannels swizzle = MTLTextureSwizzleChannelsMake(
  426. SWIZZLE(r, Red),
  427. SWIZZLE(g, Green),
  428. SWIZZLE(b, Blue),
  429. SWIZZLE(a, Alpha));
  430. #undef SWIZZLE
  431. id<MTLTexture> obj = [src_texture newTextureViewWithPixelFormat:format
  432. textureType:textureType
  433. levels:NSMakeRange(p_mipmap, p_mipmaps)
  434. slices:NSMakeRange(p_layer, p_layers)
  435. swizzle:swizzle];
  436. ERR_FAIL_NULL_V_MSG(obj, TextureID(), "Unable to create shared texture");
  437. return rid::make(obj);
  438. }
  439. void RenderingDeviceDriverMetal::texture_free(TextureID p_texture) {
  440. rid::release(p_texture);
  441. }
  442. uint64_t RenderingDeviceDriverMetal::texture_get_allocation_size(TextureID p_texture) {
  443. id<MTLTexture> obj = rid::get(p_texture);
  444. return obj.allocatedSize;
  445. }
  446. void RenderingDeviceDriverMetal::_get_sub_resource(TextureID p_texture, const TextureSubresource &p_subresource, TextureCopyableLayout *r_layout) const {
  447. id<MTLTexture> obj = rid::get(p_texture);
  448. *r_layout = {};
  449. PixelFormats &pf = *pixel_formats;
  450. size_t row_alignment = get_texel_buffer_alignment_for_format(obj.pixelFormat);
  451. size_t offset = 0;
  452. size_t array_layers = obj.arrayLength;
  453. MTLSize size = MTLSizeMake(obj.width, obj.height, obj.depth);
  454. MTLPixelFormat pixel_format = obj.pixelFormat;
  455. // First skip over the mipmap levels.
  456. for (uint32_t mipLvl = 0; mipLvl < p_subresource.mipmap; mipLvl++) {
  457. MTLSize mip_size = mipmapLevelSizeFromSize(size, mipLvl);
  458. size_t bytes_per_row = pf.getBytesPerRow(pixel_format, mip_size.width);
  459. bytes_per_row = round_up_to_alignment(bytes_per_row, row_alignment);
  460. size_t bytes_per_layer = pf.getBytesPerLayer(pixel_format, bytes_per_row, mip_size.height);
  461. offset += bytes_per_layer * mip_size.depth * array_layers;
  462. }
  463. // Get current mipmap.
  464. MTLSize mip_size = mipmapLevelSizeFromSize(size, p_subresource.mipmap);
  465. size_t bytes_per_row = pf.getBytesPerRow(pixel_format, mip_size.width);
  466. bytes_per_row = round_up_to_alignment(bytes_per_row, row_alignment);
  467. size_t bytes_per_layer = pf.getBytesPerLayer(pixel_format, bytes_per_row, mip_size.height);
  468. r_layout->size = bytes_per_layer * mip_size.depth;
  469. r_layout->offset = offset + (r_layout->size * p_subresource.layer - 1);
  470. r_layout->depth_pitch = bytes_per_layer;
  471. r_layout->row_pitch = bytes_per_row;
  472. r_layout->layer_pitch = r_layout->size * array_layers;
  473. }
  474. void RenderingDeviceDriverMetal::texture_get_copyable_layout(TextureID p_texture, const TextureSubresource &p_subresource, TextureCopyableLayout *r_layout) {
  475. id<MTLTexture> obj = rid::get(p_texture);
  476. *r_layout = {};
  477. if ((obj.resourceOptions & MTLResourceStorageModePrivate) != 0) {
  478. MTLSize sz = MTLSizeMake(obj.width, obj.height, obj.depth);
  479. PixelFormats &pf = *pixel_formats;
  480. DataFormat format = pf.getDataFormat(obj.pixelFormat);
  481. if (p_subresource.mipmap > 0) {
  482. r_layout->offset = get_image_format_required_size(format, sz.width, sz.height, sz.depth, p_subresource.mipmap);
  483. }
  484. sz = mipmapLevelSizeFromSize(sz, p_subresource.mipmap);
  485. uint32_t bw = 0, bh = 0;
  486. get_compressed_image_format_block_dimensions(format, bw, bh);
  487. uint32_t sbw = 0, sbh = 0;
  488. r_layout->size = get_image_format_required_size(format, sz.width, sz.height, sz.depth, 1, &sbw, &sbh);
  489. r_layout->row_pitch = r_layout->size / ((sbh / bh) * sz.depth);
  490. r_layout->depth_pitch = r_layout->size / sz.depth;
  491. uint32_t array_length = obj.arrayLength;
  492. if (obj.textureType == MTLTextureTypeCube) {
  493. array_length = 6;
  494. } else if (obj.textureType == MTLTextureTypeCubeArray) {
  495. array_length *= 6;
  496. }
  497. r_layout->layer_pitch = r_layout->size / array_length;
  498. } else {
  499. CRASH_NOW_MSG("need to calculate layout for shared texture");
  500. }
  501. }
  502. uint8_t *RenderingDeviceDriverMetal::texture_map(TextureID p_texture, const TextureSubresource &p_subresource) {
  503. id<MTLTexture> obj = rid::get(p_texture);
  504. ERR_FAIL_NULL_V_MSG(obj.buffer, nullptr, "texture is not created from a buffer");
  505. TextureCopyableLayout layout;
  506. _get_sub_resource(p_texture, p_subresource, &layout);
  507. return (uint8_t *)(obj.buffer.contents) + layout.offset;
  508. PixelFormats &pf = *pixel_formats;
  509. size_t row_alignment = get_texel_buffer_alignment_for_format(obj.pixelFormat);
  510. size_t offset = 0;
  511. size_t array_layers = obj.arrayLength;
  512. MTLSize size = MTLSizeMake(obj.width, obj.height, obj.depth);
  513. MTLPixelFormat pixel_format = obj.pixelFormat;
  514. // First skip over the mipmap levels.
  515. for (uint32_t mipLvl = 0; mipLvl < p_subresource.mipmap; mipLvl++) {
  516. MTLSize mipExtent = mipmapLevelSizeFromSize(size, mipLvl);
  517. size_t bytes_per_row = pf.getBytesPerRow(pixel_format, mipExtent.width);
  518. bytes_per_row = round_up_to_alignment(bytes_per_row, row_alignment);
  519. size_t bytes_per_layer = pf.getBytesPerLayer(pixel_format, bytes_per_row, mipExtent.height);
  520. offset += bytes_per_layer * mipExtent.depth * array_layers;
  521. }
  522. if (p_subresource.layer > 1) {
  523. // Calculate offset to desired layer.
  524. MTLSize mipExtent = mipmapLevelSizeFromSize(size, p_subresource.mipmap);
  525. size_t bytes_per_row = pf.getBytesPerRow(pixel_format, mipExtent.width);
  526. bytes_per_row = round_up_to_alignment(bytes_per_row, row_alignment);
  527. size_t bytes_per_layer = pf.getBytesPerLayer(pixel_format, bytes_per_row, mipExtent.height);
  528. offset += bytes_per_layer * mipExtent.depth * (p_subresource.layer - 1);
  529. }
  530. // TODO: Confirm with rendering team that there is no other way Godot may attempt to map a texture with multiple mipmaps or array layers.
  531. // NOTE: It is not possible to create a buffer-backed texture with mipmaps or array layers,
  532. // as noted in the is_valid_linear function, so the offset calculation SHOULD always be zero.
  533. // Given that, this code should be simplified.
  534. return (uint8_t *)(obj.buffer.contents) + offset;
  535. }
  536. void RenderingDeviceDriverMetal::texture_unmap(TextureID p_texture) {
  537. // Nothing to do.
  538. }
  539. BitField<RDD::TextureUsageBits> RenderingDeviceDriverMetal::texture_get_usages_supported_by_format(DataFormat p_format, bool p_cpu_readable) {
  540. PixelFormats &pf = *pixel_formats;
  541. if (pf.getMTLPixelFormat(p_format) == MTLPixelFormatInvalid) {
  542. return 0;
  543. }
  544. MTLFmtCaps caps = pf.getCapabilities(p_format);
  545. // Everything supported by default makes an all-or-nothing check easier for the caller.
  546. BitField<RDD::TextureUsageBits> supported = INT64_MAX;
  547. supported.clear_flag(TEXTURE_USAGE_VRS_ATTACHMENT_BIT); // No VRS support for Metal.
  548. if (!flags::any(caps, kMTLFmtCapsColorAtt)) {
  549. supported.clear_flag(TEXTURE_USAGE_COLOR_ATTACHMENT_BIT);
  550. }
  551. if (!flags::any(caps, kMTLFmtCapsDSAtt)) {
  552. supported.clear_flag(TEXTURE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
  553. }
  554. if (!flags::any(caps, kMTLFmtCapsRead)) {
  555. supported.clear_flag(TEXTURE_USAGE_SAMPLING_BIT);
  556. }
  557. if (!flags::any(caps, kMTLFmtCapsAtomic)) {
  558. supported.clear_flag(TEXTURE_USAGE_STORAGE_ATOMIC_BIT);
  559. }
  560. return supported;
  561. }
  562. bool RenderingDeviceDriverMetal::texture_can_make_shared_with_format(TextureID p_texture, DataFormat p_format, bool &r_raw_reinterpretation) {
  563. r_raw_reinterpretation = false;
  564. return true;
  565. }
  566. #pragma mark - Sampler
  567. static const MTLCompareFunction COMPARE_OPERATORS[RD::COMPARE_OP_MAX] = {
  568. MTLCompareFunctionNever,
  569. MTLCompareFunctionLess,
  570. MTLCompareFunctionEqual,
  571. MTLCompareFunctionLessEqual,
  572. MTLCompareFunctionGreater,
  573. MTLCompareFunctionNotEqual,
  574. MTLCompareFunctionGreaterEqual,
  575. MTLCompareFunctionAlways,
  576. };
  577. static const MTLStencilOperation STENCIL_OPERATIONS[RD::STENCIL_OP_MAX] = {
  578. MTLStencilOperationKeep,
  579. MTLStencilOperationZero,
  580. MTLStencilOperationReplace,
  581. MTLStencilOperationIncrementClamp,
  582. MTLStencilOperationDecrementClamp,
  583. MTLStencilOperationInvert,
  584. MTLStencilOperationIncrementWrap,
  585. MTLStencilOperationDecrementWrap,
  586. };
  587. static const MTLBlendFactor BLEND_FACTORS[RD::BLEND_FACTOR_MAX] = {
  588. MTLBlendFactorZero,
  589. MTLBlendFactorOne,
  590. MTLBlendFactorSourceColor,
  591. MTLBlendFactorOneMinusSourceColor,
  592. MTLBlendFactorDestinationColor,
  593. MTLBlendFactorOneMinusDestinationColor,
  594. MTLBlendFactorSourceAlpha,
  595. MTLBlendFactorOneMinusSourceAlpha,
  596. MTLBlendFactorDestinationAlpha,
  597. MTLBlendFactorOneMinusDestinationAlpha,
  598. MTLBlendFactorBlendColor,
  599. MTLBlendFactorOneMinusBlendColor,
  600. MTLBlendFactorBlendAlpha,
  601. MTLBlendFactorOneMinusBlendAlpha,
  602. MTLBlendFactorSourceAlphaSaturated,
  603. MTLBlendFactorSource1Color,
  604. MTLBlendFactorOneMinusSource1Color,
  605. MTLBlendFactorSource1Alpha,
  606. MTLBlendFactorOneMinusSource1Alpha,
  607. };
  608. static const MTLBlendOperation BLEND_OPERATIONS[RD::BLEND_OP_MAX] = {
  609. MTLBlendOperationAdd,
  610. MTLBlendOperationSubtract,
  611. MTLBlendOperationReverseSubtract,
  612. MTLBlendOperationMin,
  613. MTLBlendOperationMax,
  614. };
  615. static const API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) MTLSamplerAddressMode ADDRESS_MODES[RD::SAMPLER_REPEAT_MODE_MAX] = {
  616. MTLSamplerAddressModeRepeat,
  617. MTLSamplerAddressModeMirrorRepeat,
  618. MTLSamplerAddressModeClampToEdge,
  619. MTLSamplerAddressModeClampToBorderColor,
  620. MTLSamplerAddressModeMirrorClampToEdge,
  621. };
  622. static const API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) MTLSamplerBorderColor SAMPLER_BORDER_COLORS[RD::SAMPLER_BORDER_COLOR_MAX] = {
  623. MTLSamplerBorderColorTransparentBlack,
  624. MTLSamplerBorderColorTransparentBlack,
  625. MTLSamplerBorderColorOpaqueBlack,
  626. MTLSamplerBorderColorOpaqueBlack,
  627. MTLSamplerBorderColorOpaqueWhite,
  628. MTLSamplerBorderColorOpaqueWhite,
  629. };
  630. RDD::SamplerID RenderingDeviceDriverMetal::sampler_create(const SamplerState &p_state) {
  631. MTLSamplerDescriptor *desc = [MTLSamplerDescriptor new];
  632. desc.supportArgumentBuffers = YES;
  633. desc.magFilter = p_state.mag_filter == SAMPLER_FILTER_LINEAR ? MTLSamplerMinMagFilterLinear : MTLSamplerMinMagFilterNearest;
  634. desc.minFilter = p_state.min_filter == SAMPLER_FILTER_LINEAR ? MTLSamplerMinMagFilterLinear : MTLSamplerMinMagFilterNearest;
  635. desc.mipFilter = p_state.mip_filter == SAMPLER_FILTER_LINEAR ? MTLSamplerMipFilterLinear : MTLSamplerMipFilterNearest;
  636. desc.sAddressMode = ADDRESS_MODES[p_state.repeat_u];
  637. desc.tAddressMode = ADDRESS_MODES[p_state.repeat_v];
  638. desc.rAddressMode = ADDRESS_MODES[p_state.repeat_w];
  639. if (p_state.use_anisotropy) {
  640. desc.maxAnisotropy = p_state.anisotropy_max;
  641. }
  642. desc.compareFunction = COMPARE_OPERATORS[p_state.compare_op];
  643. desc.lodMinClamp = p_state.min_lod;
  644. desc.lodMaxClamp = p_state.max_lod;
  645. desc.borderColor = SAMPLER_BORDER_COLORS[p_state.border_color];
  646. desc.normalizedCoordinates = !p_state.unnormalized_uvw;
  647. if (p_state.lod_bias != 0.0) {
  648. WARN_PRINT_ONCE("Metal does not support LOD bias for samplers.");
  649. }
  650. id<MTLSamplerState> obj = [device newSamplerStateWithDescriptor:desc];
  651. ERR_FAIL_NULL_V_MSG(obj, SamplerID(), "newSamplerStateWithDescriptor failed");
  652. return rid::make(obj);
  653. }
  654. void RenderingDeviceDriverMetal::sampler_free(SamplerID p_sampler) {
  655. rid::release(p_sampler);
  656. }
  657. bool RenderingDeviceDriverMetal::sampler_is_format_supported_for_filter(DataFormat p_format, SamplerFilter p_filter) {
  658. switch (p_filter) {
  659. case SAMPLER_FILTER_NEAREST:
  660. return true;
  661. case SAMPLER_FILTER_LINEAR: {
  662. MTLFmtCaps caps = pixel_formats->getCapabilities(p_format);
  663. return flags::any(caps, kMTLFmtCapsFilter);
  664. }
  665. }
  666. }
  667. #pragma mark - Vertex Array
  668. RDD::VertexFormatID RenderingDeviceDriverMetal::vertex_format_create(VectorView<VertexAttribute> p_vertex_attribs) {
  669. MTLVertexDescriptor *desc = MTLVertexDescriptor.vertexDescriptor;
  670. for (uint32_t i = 0; i < p_vertex_attribs.size(); i++) {
  671. VertexAttribute const &vf = p_vertex_attribs[i];
  672. ERR_FAIL_COND_V_MSG(get_format_vertex_size(vf.format) == 0, VertexFormatID(),
  673. "Data format for attachment (" + itos(i) + "), '" + FORMAT_NAMES[vf.format] + "', is not valid for a vertex array.");
  674. desc.attributes[vf.location].format = pixel_formats->getMTLVertexFormat(vf.format);
  675. desc.attributes[vf.location].offset = vf.offset;
  676. uint32_t idx = get_metal_buffer_index_for_vertex_attribute_binding(i);
  677. desc.attributes[vf.location].bufferIndex = idx;
  678. if (vf.stride == 0) {
  679. desc.layouts[idx].stepFunction = MTLVertexStepFunctionConstant;
  680. desc.layouts[idx].stepRate = 0;
  681. desc.layouts[idx].stride = pixel_formats->getBytesPerBlock(vf.format);
  682. } else {
  683. desc.layouts[idx].stepFunction = vf.frequency == VERTEX_FREQUENCY_VERTEX ? MTLVertexStepFunctionPerVertex : MTLVertexStepFunctionPerInstance;
  684. desc.layouts[idx].stepRate = 1;
  685. desc.layouts[idx].stride = vf.stride;
  686. }
  687. }
  688. return rid::make(desc);
  689. }
  690. void RenderingDeviceDriverMetal::vertex_format_free(VertexFormatID p_vertex_format) {
  691. rid::release(p_vertex_format);
  692. }
  693. #pragma mark - Barriers
  694. void RenderingDeviceDriverMetal::command_pipeline_barrier(
  695. CommandBufferID p_cmd_buffer,
  696. BitField<PipelineStageBits> p_src_stages,
  697. BitField<PipelineStageBits> p_dst_stages,
  698. VectorView<MemoryBarrier> p_memory_barriers,
  699. VectorView<BufferBarrier> p_buffer_barriers,
  700. VectorView<TextureBarrier> p_texture_barriers) {
  701. WARN_PRINT_ONCE("not implemented");
  702. }
  703. #pragma mark - Fences
  704. RDD::FenceID RenderingDeviceDriverMetal::fence_create() {
  705. Fence *fence = memnew(Fence);
  706. return FenceID(fence);
  707. }
  708. Error RenderingDeviceDriverMetal::fence_wait(FenceID p_fence) {
  709. Fence *fence = (Fence *)(p_fence.id);
  710. // Wait forever, so this function is infallible.
  711. dispatch_semaphore_wait(fence->semaphore, DISPATCH_TIME_FOREVER);
  712. return OK;
  713. }
  714. void RenderingDeviceDriverMetal::fence_free(FenceID p_fence) {
  715. Fence *fence = (Fence *)(p_fence.id);
  716. memdelete(fence);
  717. }
  718. #pragma mark - Semaphores
  719. RDD::SemaphoreID RenderingDeviceDriverMetal::semaphore_create() {
  720. // Metal doesn't use semaphores, as their purpose within Godot is to ensure ordering of command buffer execution.
  721. return SemaphoreID(1);
  722. }
  723. void RenderingDeviceDriverMetal::semaphore_free(SemaphoreID p_semaphore) {
  724. }
  725. #pragma mark - Queues
  726. RDD::CommandQueueFamilyID RenderingDeviceDriverMetal::command_queue_family_get(BitField<CommandQueueFamilyBits> p_cmd_queue_family_bits, RenderingContextDriver::SurfaceID p_surface) {
  727. if (p_cmd_queue_family_bits.has_flag(COMMAND_QUEUE_FAMILY_GRAPHICS_BIT) || (p_surface != 0)) {
  728. return CommandQueueFamilyID(COMMAND_QUEUE_FAMILY_GRAPHICS_BIT);
  729. } else if (p_cmd_queue_family_bits.has_flag(COMMAND_QUEUE_FAMILY_COMPUTE_BIT)) {
  730. return CommandQueueFamilyID(COMMAND_QUEUE_FAMILY_COMPUTE_BIT);
  731. } else if (p_cmd_queue_family_bits.has_flag(COMMAND_QUEUE_FAMILY_TRANSFER_BIT)) {
  732. return CommandQueueFamilyID(COMMAND_QUEUE_FAMILY_TRANSFER_BIT);
  733. } else {
  734. return CommandQueueFamilyID();
  735. }
  736. }
  737. RDD::CommandQueueID RenderingDeviceDriverMetal::command_queue_create(CommandQueueFamilyID p_cmd_queue_family, bool p_identify_as_main_queue) {
  738. return CommandQueueID(1);
  739. }
  740. Error RenderingDeviceDriverMetal::command_queue_execute_and_present(CommandQueueID p_cmd_queue, VectorView<SemaphoreID>, VectorView<CommandBufferID> p_cmd_buffers, VectorView<SemaphoreID>, FenceID p_cmd_fence, VectorView<SwapChainID> p_swap_chains) {
  741. uint32_t size = p_cmd_buffers.size();
  742. if (size == 0) {
  743. return OK;
  744. }
  745. for (uint32_t i = 0; i < size - 1; i++) {
  746. MDCommandBuffer *cmd_buffer = (MDCommandBuffer *)(p_cmd_buffers[i].id);
  747. cmd_buffer->commit();
  748. }
  749. // The last command buffer will signal the fence and semaphores.
  750. MDCommandBuffer *cmd_buffer = (MDCommandBuffer *)(p_cmd_buffers[size - 1].id);
  751. Fence *fence = (Fence *)(p_cmd_fence.id);
  752. if (fence != nullptr) {
  753. [cmd_buffer->get_command_buffer() addCompletedHandler:^(id<MTLCommandBuffer> buffer) {
  754. dispatch_semaphore_signal(fence->semaphore);
  755. }];
  756. }
  757. for (uint32_t i = 0; i < p_swap_chains.size(); i++) {
  758. SwapChain *swap_chain = (SwapChain *)(p_swap_chains[i].id);
  759. RenderingContextDriverMetal::Surface *metal_surface = (RenderingContextDriverMetal::Surface *)(swap_chain->surface);
  760. metal_surface->present(cmd_buffer);
  761. }
  762. cmd_buffer->commit();
  763. if (p_swap_chains.size() > 0) {
  764. // Used as a signal that we're presenting, so this is the end of a frame.
  765. [device_scope endScope];
  766. [device_scope beginScope];
  767. }
  768. return OK;
  769. }
  770. void RenderingDeviceDriverMetal::command_queue_free(CommandQueueID p_cmd_queue) {
  771. }
  772. #pragma mark - Command Buffers
  773. // ----- POOL -----
  774. RDD::CommandPoolID RenderingDeviceDriverMetal::command_pool_create(CommandQueueFamilyID p_cmd_queue_family, CommandBufferType p_cmd_buffer_type) {
  775. DEV_ASSERT(p_cmd_buffer_type == COMMAND_BUFFER_TYPE_PRIMARY);
  776. return rid::make(device_queue);
  777. }
  778. bool RenderingDeviceDriverMetal::command_pool_reset(CommandPoolID p_cmd_pool) {
  779. return true;
  780. }
  781. void RenderingDeviceDriverMetal::command_pool_free(CommandPoolID p_cmd_pool) {
  782. rid::release(p_cmd_pool);
  783. }
  784. // ----- BUFFER -----
  785. RDD::CommandBufferID RenderingDeviceDriverMetal::command_buffer_create(CommandPoolID p_cmd_pool) {
  786. id<MTLCommandQueue> queue = rid::get(p_cmd_pool);
  787. MDCommandBuffer *obj = new MDCommandBuffer(queue, this);
  788. command_buffers.push_back(obj);
  789. return CommandBufferID(obj);
  790. }
  791. bool RenderingDeviceDriverMetal::command_buffer_begin(CommandBufferID p_cmd_buffer) {
  792. MDCommandBuffer *obj = (MDCommandBuffer *)(p_cmd_buffer.id);
  793. obj->begin();
  794. return true;
  795. }
  796. bool RenderingDeviceDriverMetal::command_buffer_begin_secondary(CommandBufferID p_cmd_buffer, RenderPassID p_render_pass, uint32_t p_subpass, FramebufferID p_framebuffer) {
  797. ERR_FAIL_V_MSG(false, "not implemented");
  798. }
  799. void RenderingDeviceDriverMetal::command_buffer_end(CommandBufferID p_cmd_buffer) {
  800. MDCommandBuffer *obj = (MDCommandBuffer *)(p_cmd_buffer.id);
  801. obj->end();
  802. }
  803. void RenderingDeviceDriverMetal::command_buffer_execute_secondary(CommandBufferID p_cmd_buffer, VectorView<CommandBufferID> p_secondary_cmd_buffers) {
  804. ERR_FAIL_MSG("not implemented");
  805. }
  806. #pragma mark - Swap Chain
  807. void RenderingDeviceDriverMetal::_swap_chain_release(SwapChain *p_swap_chain) {
  808. _swap_chain_release_buffers(p_swap_chain);
  809. }
  810. void RenderingDeviceDriverMetal::_swap_chain_release_buffers(SwapChain *p_swap_chain) {
  811. }
  812. RDD::SwapChainID RenderingDeviceDriverMetal::swap_chain_create(RenderingContextDriver::SurfaceID p_surface) {
  813. RenderingContextDriverMetal::Surface const *surface = (RenderingContextDriverMetal::Surface *)(p_surface);
  814. // Create the render pass that will be used to draw to the swap chain's framebuffers.
  815. RDD::Attachment attachment;
  816. attachment.format = pixel_formats->getDataFormat(surface->get_pixel_format());
  817. attachment.samples = RDD::TEXTURE_SAMPLES_1;
  818. attachment.load_op = RDD::ATTACHMENT_LOAD_OP_CLEAR;
  819. attachment.store_op = RDD::ATTACHMENT_STORE_OP_STORE;
  820. RDD::Subpass subpass;
  821. RDD::AttachmentReference color_ref;
  822. color_ref.attachment = 0;
  823. color_ref.aspect.set_flag(RDD::TEXTURE_ASPECT_COLOR_BIT);
  824. subpass.color_references.push_back(color_ref);
  825. RenderPassID render_pass = render_pass_create(attachment, subpass, {}, 1);
  826. ERR_FAIL_COND_V(!render_pass, SwapChainID());
  827. // Create the empty swap chain until it is resized.
  828. SwapChain *swap_chain = memnew(SwapChain);
  829. swap_chain->surface = p_surface;
  830. swap_chain->data_format = attachment.format;
  831. swap_chain->render_pass = render_pass;
  832. return SwapChainID(swap_chain);
  833. }
  834. Error RenderingDeviceDriverMetal::swap_chain_resize(CommandQueueID p_cmd_queue, SwapChainID p_swap_chain, uint32_t p_desired_framebuffer_count) {
  835. DEV_ASSERT(p_cmd_queue.id != 0);
  836. DEV_ASSERT(p_swap_chain.id != 0);
  837. SwapChain *swap_chain = (SwapChain *)(p_swap_chain.id);
  838. RenderingContextDriverMetal::Surface *surface = (RenderingContextDriverMetal::Surface *)(swap_chain->surface);
  839. surface->resize(p_desired_framebuffer_count);
  840. // Once everything's been created correctly, indicate the surface no longer needs to be resized.
  841. context_driver->surface_set_needs_resize(swap_chain->surface, false);
  842. return OK;
  843. }
  844. RDD::FramebufferID RenderingDeviceDriverMetal::swap_chain_acquire_framebuffer(CommandQueueID p_cmd_queue, SwapChainID p_swap_chain, bool &r_resize_required) {
  845. DEV_ASSERT(p_cmd_queue.id != 0);
  846. DEV_ASSERT(p_swap_chain.id != 0);
  847. SwapChain *swap_chain = (SwapChain *)(p_swap_chain.id);
  848. if (context_driver->surface_get_needs_resize(swap_chain->surface)) {
  849. r_resize_required = true;
  850. return FramebufferID();
  851. }
  852. RenderingContextDriverMetal::Surface *metal_surface = (RenderingContextDriverMetal::Surface *)(swap_chain->surface);
  853. return metal_surface->acquire_next_frame_buffer();
  854. }
  855. RDD::RenderPassID RenderingDeviceDriverMetal::swap_chain_get_render_pass(SwapChainID p_swap_chain) {
  856. const SwapChain *swap_chain = (const SwapChain *)(p_swap_chain.id);
  857. return swap_chain->render_pass;
  858. }
  859. RDD::DataFormat RenderingDeviceDriverMetal::swap_chain_get_format(SwapChainID p_swap_chain) {
  860. const SwapChain *swap_chain = (const SwapChain *)(p_swap_chain.id);
  861. return swap_chain->data_format;
  862. }
  863. void RenderingDeviceDriverMetal::swap_chain_set_max_fps(SwapChainID p_swap_chain, int p_max_fps) {
  864. SwapChain *swap_chain = (SwapChain *)(p_swap_chain.id);
  865. RenderingContextDriverMetal::Surface *metal_surface = (RenderingContextDriverMetal::Surface *)(swap_chain->surface);
  866. metal_surface->set_max_fps(p_max_fps);
  867. }
  868. void RenderingDeviceDriverMetal::swap_chain_free(SwapChainID p_swap_chain) {
  869. SwapChain *swap_chain = (SwapChain *)(p_swap_chain.id);
  870. _swap_chain_release(swap_chain);
  871. render_pass_free(swap_chain->render_pass);
  872. memdelete(swap_chain);
  873. }
  874. #pragma mark - Frame buffer
  875. RDD::FramebufferID RenderingDeviceDriverMetal::framebuffer_create(RenderPassID p_render_pass, VectorView<TextureID> p_attachments, uint32_t p_width, uint32_t p_height) {
  876. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  877. Vector<MTL::Texture> textures;
  878. textures.resize(p_attachments.size());
  879. for (uint32_t i = 0; i < p_attachments.size(); i += 1) {
  880. MDAttachment const &a = pass->attachments[i];
  881. id<MTLTexture> tex = rid::get(p_attachments[i]);
  882. if (tex == nil) {
  883. #if DEV_ENABLED
  884. WARN_PRINT("Invalid texture for attachment " + itos(i));
  885. #endif
  886. }
  887. if (a.samples > 1) {
  888. if (tex.sampleCount != a.samples) {
  889. #if DEV_ENABLED
  890. WARN_PRINT("Mismatched sample count for attachment " + itos(i) + "; expected " + itos(a.samples) + ", got " + itos(tex.sampleCount));
  891. #endif
  892. }
  893. }
  894. textures.write[i] = tex;
  895. }
  896. MDFrameBuffer *fb = new MDFrameBuffer(textures, Size2i(p_width, p_height));
  897. return FramebufferID(fb);
  898. }
  899. void RenderingDeviceDriverMetal::framebuffer_free(FramebufferID p_framebuffer) {
  900. MDFrameBuffer *obj = (MDFrameBuffer *)(p_framebuffer.id);
  901. delete obj;
  902. }
  903. #pragma mark - Shader
  904. const uint32_t SHADER_BINARY_VERSION = 4;
  905. // region Serialization
  906. class BufWriter;
  907. template <typename T>
  908. concept Serializable = requires(T t, BufWriter &p_writer) {
  909. {
  910. t.serialize_size()
  911. } -> std::same_as<size_t>;
  912. {
  913. t.serialize(p_writer)
  914. } -> std::same_as<void>;
  915. };
  916. class BufWriter {
  917. uint8_t *data = nullptr;
  918. uint64_t length = 0; // Length of data.
  919. uint64_t pos = 0;
  920. public:
  921. BufWriter(uint8_t *p_data, uint64_t p_length) :
  922. data(p_data), length(p_length) {}
  923. template <Serializable T>
  924. void write(T const &p_value) {
  925. p_value.serialize(*this);
  926. }
  927. _FORCE_INLINE_ void write(uint32_t p_value) {
  928. DEV_ASSERT(pos + sizeof(uint32_t) <= length);
  929. pos += encode_uint32(p_value, data + pos);
  930. }
  931. _FORCE_INLINE_ void write(RD::ShaderStage p_value) {
  932. write((uint32_t)p_value);
  933. }
  934. _FORCE_INLINE_ void write(bool p_value) {
  935. DEV_ASSERT(pos + sizeof(uint8_t) <= length);
  936. *(data + pos) = p_value ? 1 : 0;
  937. pos += 1;
  938. }
  939. _FORCE_INLINE_ void write(int p_value) {
  940. write((uint32_t)p_value);
  941. }
  942. _FORCE_INLINE_ void write(uint64_t p_value) {
  943. DEV_ASSERT(pos + sizeof(uint64_t) <= length);
  944. pos += encode_uint64(p_value, data + pos);
  945. }
  946. _FORCE_INLINE_ void write(float p_value) {
  947. DEV_ASSERT(pos + sizeof(float) <= length);
  948. pos += encode_float(p_value, data + pos);
  949. }
  950. _FORCE_INLINE_ void write(double p_value) {
  951. DEV_ASSERT(pos + sizeof(double) <= length);
  952. pos += encode_double(p_value, data + pos);
  953. }
  954. void write_compressed(CharString const &p_string) {
  955. write(p_string.length()); // Uncompressed size.
  956. DEV_ASSERT(pos + sizeof(uint32_t) + Compression::get_max_compressed_buffer_size(p_string.length(), Compression::MODE_ZSTD) <= length);
  957. // Save pointer for compressed size.
  958. uint8_t *dst_size_ptr = data + pos; // Compressed size.
  959. pos += sizeof(uint32_t);
  960. int dst_size = Compression::compress(data + pos, reinterpret_cast<uint8_t const *>(p_string.ptr()), p_string.length(), Compression::MODE_ZSTD);
  961. encode_uint32(dst_size, dst_size_ptr);
  962. pos += dst_size;
  963. }
  964. void write(CharString const &p_string) {
  965. write_buffer(reinterpret_cast<const uint8_t *>(p_string.ptr()), p_string.length());
  966. }
  967. template <typename T>
  968. void write(VectorView<T> p_vector) {
  969. write(p_vector.size());
  970. for (uint32_t i = 0; i < p_vector.size(); i++) {
  971. T const &e = p_vector[i];
  972. write(e);
  973. }
  974. }
  975. void write(VectorView<uint8_t> p_vector) {
  976. write_buffer(p_vector.ptr(), p_vector.size());
  977. }
  978. template <typename K, typename V>
  979. void write(HashMap<K, V> const &p_map) {
  980. write(p_map.size());
  981. for (KeyValue<K, V> const &e : p_map) {
  982. write(e.key);
  983. write(e.value);
  984. }
  985. }
  986. uint64_t get_pos() const {
  987. return pos;
  988. }
  989. uint64_t get_length() const {
  990. return length;
  991. }
  992. private:
  993. void write_buffer(uint8_t const *p_buffer, uint32_t p_length) {
  994. write(p_length);
  995. DEV_ASSERT(pos + p_length <= length);
  996. memcpy(data + pos, p_buffer, p_length);
  997. pos += p_length;
  998. }
  999. };
  1000. class BufReader;
  1001. template <typename T>
  1002. concept Deserializable = requires(T t, BufReader &p_reader) {
  1003. {
  1004. t.serialize_size()
  1005. } -> std::same_as<size_t>;
  1006. {
  1007. t.deserialize(p_reader)
  1008. } -> std::same_as<void>;
  1009. };
  1010. class BufReader {
  1011. uint8_t const *data = nullptr;
  1012. uint64_t length = 0;
  1013. uint64_t pos = 0;
  1014. bool check_length(size_t p_size) {
  1015. if (status != Status::OK) {
  1016. return false;
  1017. }
  1018. if (pos + p_size > length) {
  1019. status = Status::SHORT_BUFFER;
  1020. return false;
  1021. }
  1022. return true;
  1023. }
  1024. #define CHECK(p_size) \
  1025. if (!check_length(p_size)) \
  1026. return
  1027. public:
  1028. enum class Status {
  1029. OK,
  1030. SHORT_BUFFER,
  1031. BAD_COMPRESSION,
  1032. };
  1033. Status status = Status::OK;
  1034. BufReader(uint8_t const *p_data, uint64_t p_length) :
  1035. data(p_data), length(p_length) {}
  1036. template <Deserializable T>
  1037. void read(T &p_value) {
  1038. p_value.deserialize(*this);
  1039. }
  1040. _FORCE_INLINE_ void read(uint32_t &p_val) {
  1041. CHECK(sizeof(uint32_t));
  1042. p_val = decode_uint32(data + pos);
  1043. pos += sizeof(uint32_t);
  1044. }
  1045. _FORCE_INLINE_ void read(RD::ShaderStage &p_val) {
  1046. uint32_t val;
  1047. read(val);
  1048. p_val = (RD::ShaderStage)val;
  1049. }
  1050. _FORCE_INLINE_ void read(bool &p_val) {
  1051. CHECK(sizeof(uint8_t));
  1052. p_val = *(data + pos) > 0;
  1053. pos += 1;
  1054. }
  1055. _FORCE_INLINE_ void read(uint64_t &p_val) {
  1056. CHECK(sizeof(uint64_t));
  1057. p_val = decode_uint64(data + pos);
  1058. pos += sizeof(uint64_t);
  1059. }
  1060. _FORCE_INLINE_ void read(float &p_val) {
  1061. CHECK(sizeof(float));
  1062. p_val = decode_float(data + pos);
  1063. pos += sizeof(float);
  1064. }
  1065. _FORCE_INLINE_ void read(double &p_val) {
  1066. CHECK(sizeof(double));
  1067. p_val = decode_double(data + pos);
  1068. pos += sizeof(double);
  1069. }
  1070. void read(CharString &p_val) {
  1071. uint32_t len;
  1072. read(len);
  1073. CHECK(len);
  1074. p_val.resize(len + 1 /* NUL */);
  1075. memcpy(p_val.ptrw(), data + pos, len);
  1076. p_val.set(len, 0);
  1077. pos += len;
  1078. }
  1079. void read_compressed(CharString &p_val) {
  1080. uint32_t len;
  1081. read(len);
  1082. uint32_t comp_size;
  1083. read(comp_size);
  1084. CHECK(comp_size);
  1085. p_val.resize(len + 1 /* NUL */);
  1086. uint32_t bytes = (uint32_t)Compression::decompress(reinterpret_cast<uint8_t *>(p_val.ptrw()), len, data + pos, comp_size, Compression::MODE_ZSTD);
  1087. if (bytes != len) {
  1088. status = Status::BAD_COMPRESSION;
  1089. return;
  1090. }
  1091. p_val.set(len, 0);
  1092. pos += comp_size;
  1093. }
  1094. void read(LocalVector<uint8_t> &p_val) {
  1095. uint32_t len;
  1096. read(len);
  1097. CHECK(len);
  1098. p_val.resize(len);
  1099. memcpy(p_val.ptr(), data + pos, len);
  1100. pos += len;
  1101. }
  1102. template <typename T>
  1103. void read(LocalVector<T> &p_val) {
  1104. uint32_t len;
  1105. read(len);
  1106. CHECK(len);
  1107. p_val.resize(len);
  1108. for (uint32_t i = 0; i < len; i++) {
  1109. read(p_val[i]);
  1110. }
  1111. }
  1112. template <typename K, typename V>
  1113. void read(HashMap<K, V> &p_map) {
  1114. uint32_t len;
  1115. read(len);
  1116. CHECK(len);
  1117. p_map.reserve(len);
  1118. for (uint32_t i = 0; i < len; i++) {
  1119. K key;
  1120. read(key);
  1121. V value;
  1122. read(value);
  1123. p_map[key] = value;
  1124. }
  1125. }
  1126. #undef CHECK
  1127. };
  1128. const uint32_t R32UI_ALIGNMENT_CONSTANT_ID = 65535;
  1129. struct ComputeSize {
  1130. uint32_t x = 0;
  1131. uint32_t y = 0;
  1132. uint32_t z = 0;
  1133. size_t serialize_size() const {
  1134. return sizeof(uint32_t) * 3;
  1135. }
  1136. void serialize(BufWriter &p_writer) const {
  1137. p_writer.write(x);
  1138. p_writer.write(y);
  1139. p_writer.write(z);
  1140. }
  1141. void deserialize(BufReader &p_reader) {
  1142. p_reader.read(x);
  1143. p_reader.read(y);
  1144. p_reader.read(z);
  1145. }
  1146. };
  1147. struct ShaderStageData {
  1148. RD::ShaderStage stage = RD::ShaderStage::SHADER_STAGE_MAX;
  1149. uint32_t is_position_invariant = UINT32_MAX;
  1150. uint32_t supports_fast_math = UINT32_MAX;
  1151. CharString entry_point_name;
  1152. CharString source;
  1153. size_t serialize_size() const {
  1154. int comp_size = Compression::get_max_compressed_buffer_size(source.length(), Compression::MODE_ZSTD);
  1155. return sizeof(uint32_t) // Stage.
  1156. + sizeof(uint32_t) // is_position_invariant
  1157. + sizeof(uint32_t) // supports_fast_math
  1158. + sizeof(uint32_t) /* entry_point_name.utf8().length */
  1159. + entry_point_name.length() + sizeof(uint32_t) /* uncompressed size */ + sizeof(uint32_t) /* compressed size */ + comp_size;
  1160. }
  1161. void serialize(BufWriter &p_writer) const {
  1162. p_writer.write((uint32_t)stage);
  1163. p_writer.write(is_position_invariant);
  1164. p_writer.write(supports_fast_math);
  1165. p_writer.write(entry_point_name);
  1166. p_writer.write_compressed(source);
  1167. }
  1168. void deserialize(BufReader &p_reader) {
  1169. p_reader.read((uint32_t &)stage);
  1170. p_reader.read(is_position_invariant);
  1171. p_reader.read(supports_fast_math);
  1172. p_reader.read(entry_point_name);
  1173. p_reader.read_compressed(source);
  1174. }
  1175. };
  1176. struct SpecializationConstantData {
  1177. uint32_t constant_id = UINT32_MAX;
  1178. RD::PipelineSpecializationConstantType type = RD::PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
  1179. ShaderStageUsage stages = ShaderStageUsage::None;
  1180. // Specifies the stages the constant is used by Metal.
  1181. ShaderStageUsage used_stages = ShaderStageUsage::None;
  1182. uint32_t int_value = UINT32_MAX;
  1183. size_t serialize_size() const {
  1184. return sizeof(constant_id) + sizeof(uint32_t) // type
  1185. + sizeof(stages) + sizeof(used_stages) // used_stages
  1186. + sizeof(int_value); // int_value
  1187. }
  1188. void serialize(BufWriter &p_writer) const {
  1189. p_writer.write(constant_id);
  1190. p_writer.write((uint32_t)type);
  1191. p_writer.write(stages);
  1192. p_writer.write(used_stages);
  1193. p_writer.write(int_value);
  1194. }
  1195. void deserialize(BufReader &p_reader) {
  1196. p_reader.read(constant_id);
  1197. p_reader.read((uint32_t &)type);
  1198. p_reader.read((uint32_t &)stages);
  1199. p_reader.read((uint32_t &)used_stages);
  1200. p_reader.read(int_value);
  1201. }
  1202. };
  1203. struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) UniformData {
  1204. RD::UniformType type = RD::UniformType::UNIFORM_TYPE_MAX;
  1205. uint32_t binding = UINT32_MAX;
  1206. bool writable = false;
  1207. uint32_t length = UINT32_MAX;
  1208. ShaderStageUsage stages = ShaderStageUsage::None;
  1209. // Specifies the stages the uniform data is
  1210. // used by the Metal shader.
  1211. ShaderStageUsage active_stages = ShaderStageUsage::None;
  1212. BindingInfoMap bindings;
  1213. BindingInfoMap bindings_secondary;
  1214. size_t serialize_size() const {
  1215. size_t size = 0;
  1216. size += sizeof(uint32_t); // type
  1217. size += sizeof(uint32_t); // binding
  1218. size += sizeof(uint32_t); // writable
  1219. size += sizeof(uint32_t); // length
  1220. size += sizeof(uint32_t); // stages
  1221. size += sizeof(uint32_t); // active_stages
  1222. size += sizeof(uint32_t); // bindings.size()
  1223. size += sizeof(uint32_t) * bindings.size(); // Total size of keys.
  1224. for (KeyValue<RD::ShaderStage, BindingInfo> const &e : bindings) {
  1225. size += e.value.serialize_size();
  1226. }
  1227. size += sizeof(uint32_t); // bindings_secondary.size()
  1228. size += sizeof(uint32_t) * bindings_secondary.size(); // Total size of keys.
  1229. for (KeyValue<RD::ShaderStage, BindingInfo> const &e : bindings_secondary) {
  1230. size += e.value.serialize_size();
  1231. }
  1232. return size;
  1233. }
  1234. void serialize(BufWriter &p_writer) const {
  1235. p_writer.write((uint32_t)type);
  1236. p_writer.write(binding);
  1237. p_writer.write(writable);
  1238. p_writer.write(length);
  1239. p_writer.write(stages);
  1240. p_writer.write(active_stages);
  1241. p_writer.write(bindings);
  1242. p_writer.write(bindings_secondary);
  1243. }
  1244. void deserialize(BufReader &p_reader) {
  1245. p_reader.read((uint32_t &)type);
  1246. p_reader.read(binding);
  1247. p_reader.read(writable);
  1248. p_reader.read(length);
  1249. p_reader.read((uint32_t &)stages);
  1250. p_reader.read((uint32_t &)active_stages);
  1251. p_reader.read(bindings);
  1252. p_reader.read(bindings_secondary);
  1253. }
  1254. };
  1255. struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) UniformSetData {
  1256. uint32_t index = UINT32_MAX;
  1257. LocalVector<UniformData> uniforms;
  1258. size_t serialize_size() const {
  1259. size_t size = 0;
  1260. size += sizeof(uint32_t); // index
  1261. size += sizeof(uint32_t); // uniforms.size()
  1262. for (UniformData const &e : uniforms) {
  1263. size += e.serialize_size();
  1264. }
  1265. return size;
  1266. }
  1267. void serialize(BufWriter &p_writer) const {
  1268. p_writer.write(index);
  1269. p_writer.write(VectorView(uniforms));
  1270. }
  1271. void deserialize(BufReader &p_reader) {
  1272. p_reader.read(index);
  1273. p_reader.read(uniforms);
  1274. }
  1275. UniformSetData() = default;
  1276. UniformSetData(uint32_t p_index) :
  1277. index(p_index) {}
  1278. };
  1279. struct PushConstantData {
  1280. uint32_t size = UINT32_MAX;
  1281. ShaderStageUsage stages = ShaderStageUsage::None;
  1282. ShaderStageUsage used_stages = ShaderStageUsage::None;
  1283. HashMap<RD::ShaderStage, uint32_t> msl_binding;
  1284. size_t serialize_size() const {
  1285. return sizeof(uint32_t) // size
  1286. + sizeof(uint32_t) // stages
  1287. + sizeof(uint32_t) // used_stages
  1288. + sizeof(uint32_t) // msl_binding.size()
  1289. + sizeof(uint32_t) * msl_binding.size() // keys
  1290. + sizeof(uint32_t) * msl_binding.size(); // values
  1291. }
  1292. void serialize(BufWriter &p_writer) const {
  1293. p_writer.write(size);
  1294. p_writer.write((uint32_t)stages);
  1295. p_writer.write((uint32_t)used_stages);
  1296. p_writer.write(msl_binding);
  1297. }
  1298. void deserialize(BufReader &p_reader) {
  1299. p_reader.read(size);
  1300. p_reader.read((uint32_t &)stages);
  1301. p_reader.read((uint32_t &)used_stages);
  1302. p_reader.read(msl_binding);
  1303. }
  1304. };
  1305. struct API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0)) ShaderBinaryData {
  1306. enum Flags : uint32_t {
  1307. NONE = 0,
  1308. NEEDS_VIEW_MASK_BUFFER = 1 << 0,
  1309. USES_ARGUMENT_BUFFERS = 1 << 1,
  1310. };
  1311. CharString shader_name;
  1312. // The Metal language version specified when compiling SPIR-V to MSL.
  1313. // Format is major * 10000 + minor * 100 + patch.
  1314. uint32_t msl_version = UINT32_MAX;
  1315. uint32_t vertex_input_mask = UINT32_MAX;
  1316. uint32_t fragment_output_mask = UINT32_MAX;
  1317. uint32_t spirv_specialization_constants_ids_mask = UINT32_MAX;
  1318. uint32_t flags = NONE;
  1319. ComputeSize compute_local_size;
  1320. PushConstantData push_constant;
  1321. LocalVector<ShaderStageData> stages;
  1322. LocalVector<SpecializationConstantData> constants;
  1323. LocalVector<UniformSetData> uniforms;
  1324. MTLLanguageVersion get_msl_version() const {
  1325. uint32_t major = msl_version / 10000;
  1326. uint32_t minor = (msl_version / 100) % 100;
  1327. return MTLLanguageVersion((major << 0x10) + minor);
  1328. }
  1329. bool is_compute() const {
  1330. return std::any_of(stages.begin(), stages.end(), [](ShaderStageData const &e) {
  1331. return e.stage == RD::ShaderStage::SHADER_STAGE_COMPUTE;
  1332. });
  1333. }
  1334. bool needs_view_mask_buffer() const {
  1335. return flags & NEEDS_VIEW_MASK_BUFFER;
  1336. }
  1337. void set_needs_view_mask_buffer(bool p_value) {
  1338. if (p_value) {
  1339. flags |= NEEDS_VIEW_MASK_BUFFER;
  1340. } else {
  1341. flags &= ~NEEDS_VIEW_MASK_BUFFER;
  1342. }
  1343. }
  1344. bool uses_argument_buffers() const {
  1345. return flags & USES_ARGUMENT_BUFFERS;
  1346. }
  1347. void set_uses_argument_buffers(bool p_value) {
  1348. if (p_value) {
  1349. flags |= USES_ARGUMENT_BUFFERS;
  1350. } else {
  1351. flags &= ~USES_ARGUMENT_BUFFERS;
  1352. }
  1353. }
  1354. size_t serialize_size() const {
  1355. size_t size = 0;
  1356. size += sizeof(uint32_t) + shader_name.length(); // shader_name
  1357. size += sizeof(msl_version);
  1358. size += sizeof(vertex_input_mask);
  1359. size += sizeof(fragment_output_mask);
  1360. size += sizeof(spirv_specialization_constants_ids_mask);
  1361. size += sizeof(flags);
  1362. size += compute_local_size.serialize_size();
  1363. size += push_constant.serialize_size();
  1364. size += sizeof(uint32_t); // stages.size()
  1365. for (ShaderStageData const &e : stages) {
  1366. size += e.serialize_size();
  1367. }
  1368. size += sizeof(uint32_t); // constants.size()
  1369. for (SpecializationConstantData const &e : constants) {
  1370. size += e.serialize_size();
  1371. }
  1372. size += sizeof(uint32_t); // uniforms.size()
  1373. for (UniformSetData const &e : uniforms) {
  1374. size += e.serialize_size();
  1375. }
  1376. return size;
  1377. }
  1378. void serialize(BufWriter &p_writer) const {
  1379. p_writer.write(shader_name);
  1380. p_writer.write(msl_version);
  1381. p_writer.write(vertex_input_mask);
  1382. p_writer.write(fragment_output_mask);
  1383. p_writer.write(spirv_specialization_constants_ids_mask);
  1384. p_writer.write(flags);
  1385. p_writer.write(compute_local_size);
  1386. p_writer.write(push_constant);
  1387. p_writer.write(VectorView(stages));
  1388. p_writer.write(VectorView(constants));
  1389. p_writer.write(VectorView(uniforms));
  1390. }
  1391. void deserialize(BufReader &p_reader) {
  1392. p_reader.read(shader_name);
  1393. p_reader.read(msl_version);
  1394. p_reader.read(vertex_input_mask);
  1395. p_reader.read(fragment_output_mask);
  1396. p_reader.read(spirv_specialization_constants_ids_mask);
  1397. p_reader.read(flags);
  1398. p_reader.read(compute_local_size);
  1399. p_reader.read(push_constant);
  1400. p_reader.read(stages);
  1401. p_reader.read(constants);
  1402. p_reader.read(uniforms);
  1403. }
  1404. };
  1405. // endregion
  1406. String RenderingDeviceDriverMetal::shader_get_binary_cache_key() {
  1407. static const String cache_key = "Metal-SV" + uitos(SHADER_BINARY_VERSION);
  1408. return cache_key;
  1409. }
  1410. Error RenderingDeviceDriverMetal::_reflect_spirv16(VectorView<ShaderStageSPIRVData> p_spirv, ShaderReflection &r_reflection, ShaderMeta &r_shader_meta) {
  1411. using namespace spirv_cross;
  1412. using spirv_cross::Resource;
  1413. r_reflection = {};
  1414. r_shader_meta = {};
  1415. for (uint32_t i = 0; i < p_spirv.size(); i++) {
  1416. ShaderStageSPIRVData const &v = p_spirv[i];
  1417. ShaderStage stage = v.shader_stage;
  1418. uint32_t const *const ir = reinterpret_cast<uint32_t const *const>(v.spirv.ptr());
  1419. size_t word_count = v.spirv.size() / sizeof(uint32_t);
  1420. Parser parser(ir, word_count);
  1421. try {
  1422. parser.parse();
  1423. } catch (CompilerError &e) {
  1424. ERR_FAIL_V_MSG(ERR_CANT_CREATE, "Failed to parse IR at stage " + String(SHADER_STAGE_NAMES[stage]) + ": " + e.what());
  1425. }
  1426. ShaderStage stage_flag = (ShaderStage)(1 << p_spirv[i].shader_stage);
  1427. if (p_spirv[i].shader_stage == SHADER_STAGE_COMPUTE) {
  1428. r_reflection.is_compute = true;
  1429. ERR_FAIL_COND_V_MSG(p_spirv.size() != 1, FAILED,
  1430. "Compute shaders can only receive one stage, dedicated to compute.");
  1431. }
  1432. ERR_FAIL_COND_V_MSG(r_reflection.stages.has_flag(stage_flag), FAILED,
  1433. "Stage " + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + " submitted more than once.");
  1434. ParsedIR &pir = parser.get_parsed_ir();
  1435. using BT = SPIRType::BaseType;
  1436. Compiler compiler(std::move(pir));
  1437. if (r_reflection.is_compute) {
  1438. r_reflection.compute_local_size[0] = compiler.get_execution_mode_argument(spv::ExecutionModeLocalSize, 0);
  1439. r_reflection.compute_local_size[1] = compiler.get_execution_mode_argument(spv::ExecutionModeLocalSize, 1);
  1440. r_reflection.compute_local_size[2] = compiler.get_execution_mode_argument(spv::ExecutionModeLocalSize, 2);
  1441. }
  1442. // Parse bindings.
  1443. auto get_decoration = [&compiler](spirv_cross::ID id, spv::Decoration decoration) {
  1444. uint32_t res = -1;
  1445. if (compiler.has_decoration(id, decoration)) {
  1446. res = compiler.get_decoration(id, decoration);
  1447. }
  1448. return res;
  1449. };
  1450. // Always clearer than a boolean.
  1451. enum class Writable {
  1452. No,
  1453. Maybe,
  1454. };
  1455. // clang-format off
  1456. enum {
  1457. SPIRV_WORD_SIZE = sizeof(uint32_t),
  1458. SPIRV_DATA_ALIGNMENT = 4 * SPIRV_WORD_SIZE,
  1459. };
  1460. // clang-format on
  1461. auto process_uniforms = [&r_reflection, &compiler, &get_decoration, stage, stage_flag](SmallVector<Resource> &resources, Writable writable, std::function<RDD::UniformType(SPIRType const &)> uniform_type) {
  1462. for (Resource const &res : resources) {
  1463. ShaderUniform uniform;
  1464. std::string const &name = compiler.get_name(res.id);
  1465. uint32_t set = get_decoration(res.id, spv::DecorationDescriptorSet);
  1466. ERR_FAIL_COND_V_MSG(set == (uint32_t)-1, FAILED, "No descriptor set found");
  1467. ERR_FAIL_COND_V_MSG(set >= MAX_UNIFORM_SETS, FAILED, "On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + name.c_str() + "' uses a set (" + itos(set) + ") index larger than what is supported (" + itos(MAX_UNIFORM_SETS) + ").");
  1468. uniform.binding = get_decoration(res.id, spv::DecorationBinding);
  1469. ERR_FAIL_COND_V_MSG(uniform.binding == (uint32_t)-1, FAILED, "No binding found");
  1470. SPIRType const &a_type = compiler.get_type(res.type_id);
  1471. uniform.type = uniform_type(a_type);
  1472. // Update length.
  1473. switch (a_type.basetype) {
  1474. case BT::Struct: {
  1475. if (uniform.type == UNIFORM_TYPE_STORAGE_BUFFER) {
  1476. // Consistent with spirv_reflect.
  1477. uniform.length = 0;
  1478. } else {
  1479. uniform.length = round_up_to_alignment(compiler.get_declared_struct_size(a_type), SPIRV_DATA_ALIGNMENT);
  1480. }
  1481. } break;
  1482. case BT::Image:
  1483. case BT::Sampler:
  1484. case BT::SampledImage: {
  1485. uniform.length = 1;
  1486. for (uint32_t const &a : a_type.array) {
  1487. uniform.length *= a;
  1488. }
  1489. } break;
  1490. default:
  1491. break;
  1492. }
  1493. // Update writable.
  1494. if (writable == Writable::Maybe) {
  1495. if (a_type.basetype == BT::Struct) {
  1496. Bitset flags = compiler.get_buffer_block_flags(res.id);
  1497. uniform.writable = !compiler.has_decoration(res.id, spv::DecorationNonWritable) && !flags.get(spv::DecorationNonWritable);
  1498. } else if (a_type.basetype == BT::Image) {
  1499. if (a_type.image.access == spv::AccessQualifierMax) {
  1500. uniform.writable = !compiler.has_decoration(res.id, spv::DecorationNonWritable);
  1501. } else {
  1502. uniform.writable = a_type.image.access != spv::AccessQualifierReadOnly;
  1503. }
  1504. }
  1505. }
  1506. if (set < (uint32_t)r_reflection.uniform_sets.size()) {
  1507. // Check if this already exists.
  1508. bool exists = false;
  1509. for (uint32_t k = 0; k < r_reflection.uniform_sets[set].size(); k++) {
  1510. if (r_reflection.uniform_sets[set][k].binding == uniform.binding) {
  1511. // Already exists, verify that it's the same type.
  1512. ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].type != uniform.type, FAILED,
  1513. "On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + name.c_str() + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform type.");
  1514. // Also, verify that it's the same size.
  1515. ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].length != uniform.length, FAILED,
  1516. "On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + name.c_str() + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different uniform size.");
  1517. // Also, verify that it has the same writability.
  1518. ERR_FAIL_COND_V_MSG(r_reflection.uniform_sets[set][k].writable != uniform.writable, FAILED,
  1519. "On shader stage '" + String(SHADER_STAGE_NAMES[stage]) + "', uniform '" + name.c_str() + "' trying to reuse location for set=" + itos(set) + ", binding=" + itos(uniform.binding) + " with different writability.");
  1520. // Just append stage mask and continue.
  1521. r_reflection.uniform_sets.write[set].write[k].stages.set_flag(stage_flag);
  1522. exists = true;
  1523. break;
  1524. }
  1525. }
  1526. if (exists) {
  1527. continue; // Merged.
  1528. }
  1529. }
  1530. uniform.stages.set_flag(stage_flag);
  1531. if (set >= (uint32_t)r_reflection.uniform_sets.size()) {
  1532. r_reflection.uniform_sets.resize(set + 1);
  1533. }
  1534. r_reflection.uniform_sets.write[set].push_back(uniform);
  1535. }
  1536. return OK;
  1537. };
  1538. ShaderResources resources = compiler.get_shader_resources();
  1539. process_uniforms(resources.uniform_buffers, Writable::No, [](SPIRType const &a_type) {
  1540. DEV_ASSERT(a_type.basetype == BT::Struct);
  1541. return UNIFORM_TYPE_UNIFORM_BUFFER;
  1542. });
  1543. process_uniforms(resources.storage_buffers, Writable::Maybe, [](SPIRType const &a_type) {
  1544. DEV_ASSERT(a_type.basetype == BT::Struct);
  1545. return UNIFORM_TYPE_STORAGE_BUFFER;
  1546. });
  1547. process_uniforms(resources.storage_images, Writable::Maybe, [](SPIRType const &a_type) {
  1548. DEV_ASSERT(a_type.basetype == BT::Image);
  1549. if (a_type.image.dim == spv::DimBuffer) {
  1550. return UNIFORM_TYPE_IMAGE_BUFFER;
  1551. } else {
  1552. return UNIFORM_TYPE_IMAGE;
  1553. }
  1554. });
  1555. process_uniforms(resources.sampled_images, Writable::No, [](SPIRType const &a_type) {
  1556. DEV_ASSERT(a_type.basetype == BT::SampledImage);
  1557. return UNIFORM_TYPE_SAMPLER_WITH_TEXTURE;
  1558. });
  1559. process_uniforms(resources.separate_images, Writable::No, [](SPIRType const &a_type) {
  1560. DEV_ASSERT(a_type.basetype == BT::Image);
  1561. if (a_type.image.dim == spv::DimBuffer) {
  1562. return UNIFORM_TYPE_TEXTURE_BUFFER;
  1563. } else {
  1564. return UNIFORM_TYPE_TEXTURE;
  1565. }
  1566. });
  1567. process_uniforms(resources.separate_samplers, Writable::No, [](SPIRType const &a_type) {
  1568. DEV_ASSERT(a_type.basetype == BT::Sampler);
  1569. return UNIFORM_TYPE_SAMPLER;
  1570. });
  1571. process_uniforms(resources.subpass_inputs, Writable::No, [](SPIRType const &a_type) {
  1572. DEV_ASSERT(a_type.basetype == BT::Image && a_type.image.dim == spv::DimSubpassData);
  1573. return UNIFORM_TYPE_INPUT_ATTACHMENT;
  1574. });
  1575. if (!resources.push_constant_buffers.empty()) {
  1576. // There can be only one push constant block.
  1577. Resource const &res = resources.push_constant_buffers.front();
  1578. size_t push_constant_size = round_up_to_alignment(compiler.get_declared_struct_size(compiler.get_type(res.base_type_id)), SPIRV_DATA_ALIGNMENT);
  1579. ERR_FAIL_COND_V_MSG(r_reflection.push_constant_size && r_reflection.push_constant_size != push_constant_size, FAILED,
  1580. "Reflection of SPIR-V shader stage '" + String(SHADER_STAGE_NAMES[p_spirv[i].shader_stage]) + "': Push constant block must be the same across shader stages.");
  1581. r_reflection.push_constant_size = push_constant_size;
  1582. r_reflection.push_constant_stages.set_flag(stage_flag);
  1583. }
  1584. ERR_FAIL_COND_V_MSG(!resources.atomic_counters.empty(), FAILED, "Atomic counters not supported");
  1585. ERR_FAIL_COND_V_MSG(!resources.acceleration_structures.empty(), FAILED, "Acceleration structures not supported");
  1586. ERR_FAIL_COND_V_MSG(!resources.shader_record_buffers.empty(), FAILED, "Shader record buffers not supported");
  1587. if (stage == SHADER_STAGE_VERTEX && !resources.stage_inputs.empty()) {
  1588. for (Resource const &res : resources.stage_inputs) {
  1589. SPIRType a_type = compiler.get_type(res.base_type_id);
  1590. uint32_t loc = get_decoration(res.id, spv::DecorationLocation);
  1591. if (loc != (uint32_t)-1) {
  1592. r_reflection.vertex_input_mask |= 1 << loc;
  1593. }
  1594. }
  1595. }
  1596. if (stage == SHADER_STAGE_FRAGMENT && !resources.stage_outputs.empty()) {
  1597. for (Resource const &res : resources.stage_outputs) {
  1598. SPIRType a_type = compiler.get_type(res.base_type_id);
  1599. uint32_t loc = get_decoration(res.id, spv::DecorationLocation);
  1600. uint32_t built_in = spv::BuiltIn(get_decoration(res.id, spv::DecorationBuiltIn));
  1601. if (loc != (uint32_t)-1 && built_in != spv::BuiltInFragDepth) {
  1602. r_reflection.fragment_output_mask |= 1 << loc;
  1603. }
  1604. }
  1605. }
  1606. for (const BuiltInResource &res : resources.builtin_inputs) {
  1607. if (res.builtin == spv::BuiltInViewIndex || res.builtin == spv::BuiltInViewportIndex) {
  1608. r_shader_meta.has_multiview = true;
  1609. }
  1610. }
  1611. if (!r_shader_meta.has_multiview) {
  1612. for (const BuiltInResource &res : resources.builtin_outputs) {
  1613. if (res.builtin == spv::BuiltInViewIndex || res.builtin == spv::BuiltInViewportIndex) {
  1614. r_shader_meta.has_multiview = true;
  1615. }
  1616. }
  1617. }
  1618. // Specialization constants.
  1619. for (SpecializationConstant const &constant : compiler.get_specialization_constants()) {
  1620. int32_t existing = -1;
  1621. ShaderSpecializationConstant sconst;
  1622. SPIRConstant &spc = compiler.get_constant(constant.id);
  1623. SPIRType const &spct = compiler.get_type(spc.constant_type);
  1624. sconst.constant_id = constant.constant_id;
  1625. sconst.int_value = 0;
  1626. switch (spct.basetype) {
  1627. case BT::Boolean: {
  1628. sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_BOOL;
  1629. sconst.bool_value = spc.scalar() != 0;
  1630. } break;
  1631. case BT::Int:
  1632. case BT::UInt: {
  1633. sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_INT;
  1634. sconst.int_value = spc.scalar();
  1635. } break;
  1636. case BT::Float: {
  1637. sconst.type = PIPELINE_SPECIALIZATION_CONSTANT_TYPE_FLOAT;
  1638. sconst.float_value = spc.scalar_f32();
  1639. } break;
  1640. default:
  1641. ERR_FAIL_V_MSG(FAILED, "Unsupported specialization constant type");
  1642. }
  1643. sconst.stages.set_flag(stage_flag);
  1644. for (uint32_t k = 0; k < r_reflection.specialization_constants.size(); k++) {
  1645. if (r_reflection.specialization_constants[k].constant_id == sconst.constant_id) {
  1646. ERR_FAIL_COND_V_MSG(r_reflection.specialization_constants[k].type != sconst.type, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their types differ.");
  1647. ERR_FAIL_COND_V_MSG(r_reflection.specialization_constants[k].int_value != sconst.int_value, FAILED, "More than one specialization constant used for id (" + itos(sconst.constant_id) + "), but their default values differ.");
  1648. existing = k;
  1649. break;
  1650. }
  1651. }
  1652. if (existing > 0) {
  1653. r_reflection.specialization_constants.write[existing].stages.set_flag(stage_flag);
  1654. } else {
  1655. r_reflection.specialization_constants.push_back(sconst);
  1656. }
  1657. }
  1658. r_reflection.stages.set_flag(stage_flag);
  1659. }
  1660. // Sort all uniform_sets.
  1661. for (uint32_t i = 0; i < r_reflection.uniform_sets.size(); i++) {
  1662. r_reflection.uniform_sets.write[i].sort();
  1663. }
  1664. return OK;
  1665. }
  1666. Vector<uint8_t> RenderingDeviceDriverMetal::shader_compile_binary_from_spirv(VectorView<ShaderStageSPIRVData> p_spirv, const String &p_shader_name) {
  1667. using Result = ::Vector<uint8_t>;
  1668. using namespace spirv_cross;
  1669. using spirv_cross::CompilerMSL;
  1670. using spirv_cross::Resource;
  1671. ShaderReflection spirv_data;
  1672. ShaderMeta shader_meta;
  1673. ERR_FAIL_COND_V(_reflect_spirv16(p_spirv, spirv_data, shader_meta), Result());
  1674. ShaderBinaryData bin_data{};
  1675. if (!p_shader_name.is_empty()) {
  1676. bin_data.shader_name = p_shader_name.utf8();
  1677. } else {
  1678. bin_data.shader_name = "unnamed";
  1679. }
  1680. bin_data.vertex_input_mask = spirv_data.vertex_input_mask;
  1681. bin_data.fragment_output_mask = spirv_data.fragment_output_mask;
  1682. bin_data.compute_local_size = ComputeSize{
  1683. .x = spirv_data.compute_local_size[0],
  1684. .y = spirv_data.compute_local_size[1],
  1685. .z = spirv_data.compute_local_size[2],
  1686. };
  1687. bin_data.push_constant.size = spirv_data.push_constant_size;
  1688. bin_data.push_constant.stages = (ShaderStageUsage)(uint8_t)spirv_data.push_constant_stages;
  1689. bin_data.set_needs_view_mask_buffer(shader_meta.has_multiview);
  1690. for (uint32_t i = 0; i < spirv_data.uniform_sets.size(); i++) {
  1691. const ::Vector<ShaderUniform> &spirv_set = spirv_data.uniform_sets[i];
  1692. UniformSetData set(i);
  1693. for (const ShaderUniform &spirv_uniform : spirv_set) {
  1694. UniformData binding{};
  1695. binding.type = spirv_uniform.type;
  1696. binding.binding = spirv_uniform.binding;
  1697. binding.writable = spirv_uniform.writable;
  1698. binding.stages = (ShaderStageUsage)(uint8_t)spirv_uniform.stages;
  1699. binding.length = spirv_uniform.length;
  1700. set.uniforms.push_back(binding);
  1701. }
  1702. bin_data.uniforms.push_back(set);
  1703. }
  1704. for (const ShaderSpecializationConstant &spirv_sc : spirv_data.specialization_constants) {
  1705. SpecializationConstantData spec_constant{};
  1706. spec_constant.type = spirv_sc.type;
  1707. spec_constant.constant_id = spirv_sc.constant_id;
  1708. spec_constant.int_value = spirv_sc.int_value;
  1709. spec_constant.stages = (ShaderStageUsage)(uint8_t)spirv_sc.stages;
  1710. bin_data.constants.push_back(spec_constant);
  1711. bin_data.spirv_specialization_constants_ids_mask |= (1 << spirv_sc.constant_id);
  1712. }
  1713. // Reflection using SPIRV-Cross:
  1714. // https://github.com/KhronosGroup/SPIRV-Cross/wiki/Reflection-API-user-guide
  1715. CompilerMSL::Options msl_options{};
  1716. msl_options.set_msl_version(version_major, version_minor);
  1717. bin_data.msl_version = msl_options.msl_version;
  1718. #if TARGET_OS_OSX
  1719. msl_options.platform = CompilerMSL::Options::macOS;
  1720. #else
  1721. msl_options.platform = CompilerMSL::Options::iOS;
  1722. #endif
  1723. #if TARGET_OS_IPHONE
  1724. msl_options.ios_use_simdgroup_functions = (*device_properties).features.simdPermute;
  1725. msl_options.ios_support_base_vertex_instance = true;
  1726. #endif
  1727. bool disable_argument_buffers = false;
  1728. if (String v = OS::get_singleton()->get_environment(U"GODOT_DISABLE_ARGUMENT_BUFFERS"); v == U"1") {
  1729. disable_argument_buffers = true;
  1730. }
  1731. if (device_properties->features.argument_buffers_tier >= MTLArgumentBuffersTier2 && !disable_argument_buffers) {
  1732. msl_options.argument_buffers_tier = CompilerMSL::Options::ArgumentBuffersTier::Tier2;
  1733. msl_options.argument_buffers = true;
  1734. bin_data.set_uses_argument_buffers(true);
  1735. } else {
  1736. msl_options.argument_buffers_tier = CompilerMSL::Options::ArgumentBuffersTier::Tier1;
  1737. // Tier 1 argument buffers don't support writable textures, so we disable them completely.
  1738. msl_options.argument_buffers = false;
  1739. bin_data.set_uses_argument_buffers(false);
  1740. }
  1741. msl_options.force_active_argument_buffer_resources = true;
  1742. // We can't use this, as we have to add the descriptor sets via compiler.add_msl_resource_binding.
  1743. // msl_options.pad_argument_buffer_resources = true;
  1744. msl_options.texture_buffer_native = true; // Enable texture buffer support.
  1745. msl_options.use_framebuffer_fetch_subpasses = false;
  1746. msl_options.pad_fragment_output_components = true;
  1747. msl_options.r32ui_alignment_constant_id = R32UI_ALIGNMENT_CONSTANT_ID;
  1748. msl_options.agx_manual_cube_grad_fixup = true;
  1749. if (shader_meta.has_multiview) {
  1750. msl_options.multiview = true;
  1751. msl_options.multiview_layered_rendering = true;
  1752. msl_options.view_mask_buffer_index = VIEW_MASK_BUFFER_INDEX;
  1753. }
  1754. CompilerGLSL::Options options{};
  1755. options.vertex.flip_vert_y = true;
  1756. #if DEV_ENABLED
  1757. options.emit_line_directives = true;
  1758. #endif
  1759. for (uint32_t i = 0; i < p_spirv.size(); i++) {
  1760. ShaderStageSPIRVData const &v = p_spirv[i];
  1761. ShaderStage stage = v.shader_stage;
  1762. char const *stage_name = SHADER_STAGE_NAMES[stage];
  1763. uint32_t const *const ir = reinterpret_cast<uint32_t const *const>(v.spirv.ptr());
  1764. size_t word_count = v.spirv.size() / sizeof(uint32_t);
  1765. Parser parser(ir, word_count);
  1766. try {
  1767. parser.parse();
  1768. } catch (CompilerError &e) {
  1769. ERR_FAIL_V_MSG(Result(), "Failed to parse IR at stage " + String(SHADER_STAGE_NAMES[stage]) + ": " + e.what());
  1770. }
  1771. CompilerMSL compiler(std::move(parser.get_parsed_ir()));
  1772. compiler.set_msl_options(msl_options);
  1773. compiler.set_common_options(options);
  1774. std::unordered_set<VariableID> active = compiler.get_active_interface_variables();
  1775. ShaderResources resources = compiler.get_shader_resources();
  1776. std::string source;
  1777. try {
  1778. source = compiler.compile();
  1779. } catch (CompilerError &e) {
  1780. ERR_FAIL_V_MSG(Result(), "Failed to compile stage " + String(SHADER_STAGE_NAMES[stage]) + ": " + e.what());
  1781. }
  1782. ERR_FAIL_COND_V_MSG(compiler.get_entry_points_and_stages().size() != 1, Result(), "Expected a single entry point and stage.");
  1783. SmallVector<EntryPoint> entry_pts_stages = compiler.get_entry_points_and_stages();
  1784. EntryPoint &entry_point_stage = entry_pts_stages.front();
  1785. SPIREntryPoint &entry_point = compiler.get_entry_point(entry_point_stage.name, entry_point_stage.execution_model);
  1786. // Process specialization constants.
  1787. if (!compiler.get_specialization_constants().empty()) {
  1788. for (SpecializationConstant const &constant : compiler.get_specialization_constants()) {
  1789. LocalVector<SpecializationConstantData>::Iterator res = bin_data.constants.begin();
  1790. while (res != bin_data.constants.end()) {
  1791. if (res->constant_id == constant.constant_id) {
  1792. res->used_stages |= 1 << stage;
  1793. break;
  1794. }
  1795. ++res;
  1796. }
  1797. if (res == bin_data.constants.end()) {
  1798. WARN_PRINT(String(stage_name) + ": unable to find constant_id: " + itos(constant.constant_id));
  1799. }
  1800. }
  1801. }
  1802. // Process bindings.
  1803. LocalVector<UniformSetData> &uniform_sets = bin_data.uniforms;
  1804. using BT = SPIRType::BaseType;
  1805. // Always clearer than a boolean.
  1806. enum class Writable {
  1807. No,
  1808. Maybe,
  1809. };
  1810. // Returns a std::optional containing the value of the
  1811. // decoration, if it exists.
  1812. auto get_decoration = [&compiler](spirv_cross::ID id, spv::Decoration decoration) {
  1813. uint32_t res = -1;
  1814. if (compiler.has_decoration(id, decoration)) {
  1815. res = compiler.get_decoration(id, decoration);
  1816. }
  1817. return res;
  1818. };
  1819. auto descriptor_bindings = [&compiler, &active, &uniform_sets, stage, &get_decoration](SmallVector<Resource> &p_resources, Writable p_writable) {
  1820. for (Resource const &res : p_resources) {
  1821. uint32_t dset = get_decoration(res.id, spv::DecorationDescriptorSet);
  1822. uint32_t dbin = get_decoration(res.id, spv::DecorationBinding);
  1823. UniformData *found = nullptr;
  1824. if (dset != (uint32_t)-1 && dbin != (uint32_t)-1 && dset < uniform_sets.size()) {
  1825. UniformSetData &set = uniform_sets[dset];
  1826. LocalVector<UniformData>::Iterator pos = set.uniforms.begin();
  1827. while (pos != set.uniforms.end()) {
  1828. if (dbin == pos->binding) {
  1829. found = &(*pos);
  1830. break;
  1831. }
  1832. ++pos;
  1833. }
  1834. }
  1835. ERR_FAIL_NULL_V_MSG(found, ERR_CANT_CREATE, "UniformData not found");
  1836. bool is_active = active.find(res.id) != active.end();
  1837. if (is_active) {
  1838. found->active_stages |= 1 << stage;
  1839. }
  1840. BindingInfo primary{};
  1841. SPIRType const &a_type = compiler.get_type(res.type_id);
  1842. BT basetype = a_type.basetype;
  1843. switch (basetype) {
  1844. case BT::Struct: {
  1845. primary.dataType = MTLDataTypePointer;
  1846. } break;
  1847. case BT::Image:
  1848. case BT::SampledImage: {
  1849. primary.dataType = MTLDataTypeTexture;
  1850. } break;
  1851. case BT::Sampler: {
  1852. primary.dataType = MTLDataTypeSampler;
  1853. primary.arrayLength = 1;
  1854. for (uint32_t const &a : a_type.array) {
  1855. primary.arrayLength *= a;
  1856. }
  1857. } break;
  1858. default: {
  1859. ERR_FAIL_V_MSG(ERR_CANT_CREATE, "Unexpected BaseType");
  1860. } break;
  1861. }
  1862. // Find array length of image.
  1863. if (basetype == BT::Image || basetype == BT::SampledImage) {
  1864. primary.arrayLength = 1;
  1865. for (uint32_t const &a : a_type.array) {
  1866. primary.arrayLength *= a;
  1867. }
  1868. primary.isMultisampled = a_type.image.ms;
  1869. SPIRType::ImageType const &image = a_type.image;
  1870. primary.imageFormat = image.format;
  1871. switch (image.dim) {
  1872. case spv::Dim1D: {
  1873. if (image.arrayed) {
  1874. primary.textureType = MTLTextureType1DArray;
  1875. } else {
  1876. primary.textureType = MTLTextureType1D;
  1877. }
  1878. } break;
  1879. case spv::DimSubpassData: {
  1880. DISPATCH_FALLTHROUGH;
  1881. }
  1882. case spv::Dim2D: {
  1883. if (image.arrayed && image.ms) {
  1884. primary.textureType = MTLTextureType2DMultisampleArray;
  1885. } else if (image.arrayed) {
  1886. primary.textureType = MTLTextureType2DArray;
  1887. } else if (image.ms) {
  1888. primary.textureType = MTLTextureType2DMultisample;
  1889. } else {
  1890. primary.textureType = MTLTextureType2D;
  1891. }
  1892. } break;
  1893. case spv::Dim3D: {
  1894. primary.textureType = MTLTextureType3D;
  1895. } break;
  1896. case spv::DimCube: {
  1897. if (image.arrayed) {
  1898. primary.textureType = MTLTextureTypeCube;
  1899. }
  1900. } break;
  1901. case spv::DimRect: {
  1902. } break;
  1903. case spv::DimBuffer: {
  1904. // VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER
  1905. primary.textureType = MTLTextureTypeTextureBuffer;
  1906. } break;
  1907. case spv::DimMax: {
  1908. // Add all enumerations to silence the compiler warning
  1909. // and generate future warnings, should a new one be added.
  1910. } break;
  1911. }
  1912. }
  1913. // Update writable.
  1914. if (p_writable == Writable::Maybe) {
  1915. if (basetype == BT::Struct) {
  1916. Bitset flags = compiler.get_buffer_block_flags(res.id);
  1917. if (!flags.get(spv::DecorationNonWritable)) {
  1918. if (flags.get(spv::DecorationNonReadable)) {
  1919. primary.access = MTLBindingAccessWriteOnly;
  1920. } else {
  1921. primary.access = MTLBindingAccessReadWrite;
  1922. }
  1923. }
  1924. } else if (basetype == BT::Image) {
  1925. switch (a_type.image.access) {
  1926. case spv::AccessQualifierWriteOnly:
  1927. primary.access = MTLBindingAccessWriteOnly;
  1928. break;
  1929. case spv::AccessQualifierReadWrite:
  1930. primary.access = MTLBindingAccessReadWrite;
  1931. break;
  1932. case spv::AccessQualifierReadOnly:
  1933. break;
  1934. case spv::AccessQualifierMax:
  1935. DISPATCH_FALLTHROUGH;
  1936. default:
  1937. if (!compiler.has_decoration(res.id, spv::DecorationNonWritable)) {
  1938. if (compiler.has_decoration(res.id, spv::DecorationNonReadable)) {
  1939. primary.access = MTLBindingAccessWriteOnly;
  1940. } else {
  1941. primary.access = MTLBindingAccessReadWrite;
  1942. }
  1943. }
  1944. break;
  1945. }
  1946. }
  1947. }
  1948. switch (primary.access) {
  1949. case MTLBindingAccessReadOnly:
  1950. primary.usage = MTLResourceUsageRead;
  1951. break;
  1952. case MTLBindingAccessWriteOnly:
  1953. primary.usage = MTLResourceUsageWrite;
  1954. break;
  1955. case MTLBindingAccessReadWrite:
  1956. primary.usage = MTLResourceUsageRead | MTLResourceUsageWrite;
  1957. break;
  1958. }
  1959. primary.index = compiler.get_automatic_msl_resource_binding(res.id);
  1960. found->bindings[stage] = primary;
  1961. // A sampled image contains two bindings, the primary
  1962. // is to the image, and the secondary is to the associated sampler.
  1963. if (basetype == BT::SampledImage) {
  1964. uint32_t binding = compiler.get_automatic_msl_resource_binding_secondary(res.id);
  1965. if (binding != (uint32_t)-1) {
  1966. found->bindings_secondary[stage] = BindingInfo{
  1967. .dataType = MTLDataTypeSampler,
  1968. .index = binding,
  1969. .access = MTLBindingAccessReadOnly,
  1970. };
  1971. }
  1972. }
  1973. // An image may have a secondary binding if it is used
  1974. // for atomic operations.
  1975. if (basetype == BT::Image) {
  1976. uint32_t binding = compiler.get_automatic_msl_resource_binding_secondary(res.id);
  1977. if (binding != (uint32_t)-1) {
  1978. found->bindings_secondary[stage] = BindingInfo{
  1979. .dataType = MTLDataTypePointer,
  1980. .index = binding,
  1981. .access = MTLBindingAccessReadWrite,
  1982. };
  1983. }
  1984. }
  1985. }
  1986. return Error::OK;
  1987. };
  1988. if (!resources.uniform_buffers.empty()) {
  1989. Error err = descriptor_bindings(resources.uniform_buffers, Writable::No);
  1990. ERR_FAIL_COND_V(err != OK, Result());
  1991. }
  1992. if (!resources.storage_buffers.empty()) {
  1993. Error err = descriptor_bindings(resources.storage_buffers, Writable::Maybe);
  1994. ERR_FAIL_COND_V(err != OK, Result());
  1995. }
  1996. if (!resources.storage_images.empty()) {
  1997. Error err = descriptor_bindings(resources.storage_images, Writable::Maybe);
  1998. ERR_FAIL_COND_V(err != OK, Result());
  1999. }
  2000. if (!resources.sampled_images.empty()) {
  2001. Error err = descriptor_bindings(resources.sampled_images, Writable::No);
  2002. ERR_FAIL_COND_V(err != OK, Result());
  2003. }
  2004. if (!resources.separate_images.empty()) {
  2005. Error err = descriptor_bindings(resources.separate_images, Writable::No);
  2006. ERR_FAIL_COND_V(err != OK, Result());
  2007. }
  2008. if (!resources.separate_samplers.empty()) {
  2009. Error err = descriptor_bindings(resources.separate_samplers, Writable::No);
  2010. ERR_FAIL_COND_V(err != OK, Result());
  2011. }
  2012. if (!resources.subpass_inputs.empty()) {
  2013. Error err = descriptor_bindings(resources.subpass_inputs, Writable::No);
  2014. ERR_FAIL_COND_V(err != OK, Result());
  2015. }
  2016. if (!resources.push_constant_buffers.empty()) {
  2017. for (Resource const &res : resources.push_constant_buffers) {
  2018. uint32_t binding = compiler.get_automatic_msl_resource_binding(res.id);
  2019. if (binding != (uint32_t)-1) {
  2020. bin_data.push_constant.used_stages |= 1 << stage;
  2021. bin_data.push_constant.msl_binding[stage] = binding;
  2022. }
  2023. }
  2024. }
  2025. ERR_FAIL_COND_V_MSG(!resources.atomic_counters.empty(), Result(), "Atomic counters not supported");
  2026. ERR_FAIL_COND_V_MSG(!resources.acceleration_structures.empty(), Result(), "Acceleration structures not supported");
  2027. ERR_FAIL_COND_V_MSG(!resources.shader_record_buffers.empty(), Result(), "Shader record buffers not supported");
  2028. if (!resources.stage_inputs.empty()) {
  2029. for (Resource const &res : resources.stage_inputs) {
  2030. uint32_t binding = compiler.get_automatic_msl_resource_binding(res.id);
  2031. if (binding != (uint32_t)-1) {
  2032. bin_data.vertex_input_mask |= 1 << binding;
  2033. }
  2034. }
  2035. }
  2036. ShaderStageData stage_data;
  2037. stage_data.stage = v.shader_stage;
  2038. stage_data.is_position_invariant = compiler.is_position_invariant();
  2039. stage_data.supports_fast_math = !entry_point.flags.get(spv::ExecutionModeSignedZeroInfNanPreserve);
  2040. stage_data.entry_point_name = entry_point.name.c_str();
  2041. stage_data.source = source.c_str();
  2042. bin_data.stages.push_back(stage_data);
  2043. }
  2044. size_t vec_size = bin_data.serialize_size() + 8;
  2045. ::Vector<uint8_t> ret;
  2046. ret.resize(vec_size);
  2047. BufWriter writer(ret.ptrw(), vec_size);
  2048. const uint8_t HEADER[4] = { 'G', 'M', 'S', 'L' };
  2049. writer.write(*(uint32_t *)HEADER);
  2050. writer.write(SHADER_BINARY_VERSION);
  2051. bin_data.serialize(writer);
  2052. ret.resize(writer.get_pos());
  2053. return ret;
  2054. }
  2055. void RenderingDeviceDriverMetal::shader_cache_free_entry(const SHA256Digest &key) {
  2056. if (ShaderCacheEntry **pentry = _shader_cache.getptr(key); pentry != nullptr) {
  2057. ShaderCacheEntry *entry = *pentry;
  2058. _shader_cache.erase(key);
  2059. entry->library = nil;
  2060. memdelete(entry);
  2061. }
  2062. }
  2063. RDD::ShaderID RenderingDeviceDriverMetal::shader_create_from_bytecode(const Vector<uint8_t> &p_shader_binary, ShaderDescription &r_shader_desc, String &r_name, const Vector<ImmutableSampler> &p_immutable_samplers) {
  2064. r_shader_desc = {}; // Driver-agnostic.
  2065. const uint8_t *binptr = p_shader_binary.ptr();
  2066. uint32_t binsize = p_shader_binary.size();
  2067. BufReader reader(binptr, binsize);
  2068. uint8_t header[4];
  2069. reader.read((uint32_t &)header);
  2070. ERR_FAIL_COND_V_MSG(memcmp(header, "GMSL", 4) != 0, ShaderID(), "Invalid header");
  2071. uint32_t version = 0;
  2072. reader.read(version);
  2073. ERR_FAIL_COND_V_MSG(version != SHADER_BINARY_VERSION, ShaderID(), "Invalid shader binary version");
  2074. ShaderBinaryData binary_data;
  2075. binary_data.deserialize(reader);
  2076. switch (reader.status) {
  2077. case BufReader::Status::OK:
  2078. break;
  2079. case BufReader::Status::BAD_COMPRESSION:
  2080. ERR_FAIL_V_MSG(ShaderID(), "Invalid compressed data");
  2081. case BufReader::Status::SHORT_BUFFER:
  2082. ERR_FAIL_V_MSG(ShaderID(), "Unexpected end of buffer");
  2083. }
  2084. // We need to regenerate the shader if the cache is moved to an incompatible device.
  2085. ERR_FAIL_COND_V_MSG(device_properties->features.argument_buffers_tier < MTLArgumentBuffersTier2 && binary_data.uses_argument_buffers(),
  2086. ShaderID(),
  2087. "Shader was generated with argument buffers, but device has limited support");
  2088. MTLCompileOptions *options = [MTLCompileOptions new];
  2089. options.languageVersion = binary_data.get_msl_version();
  2090. HashMap<ShaderStage, MDLibrary *> libraries;
  2091. for (ShaderStageData &shader_data : binary_data.stages) {
  2092. SHA256Digest key = SHA256Digest(shader_data.source.ptr(), shader_data.source.length());
  2093. if (ShaderCacheEntry **p = _shader_cache.getptr(key); p != nullptr) {
  2094. libraries[shader_data.stage] = (*p)->library;
  2095. continue;
  2096. }
  2097. NSString *source = [[NSString alloc] initWithBytes:(void *)shader_data.source.ptr()
  2098. length:shader_data.source.length()
  2099. encoding:NSUTF8StringEncoding];
  2100. ShaderCacheEntry *cd = memnew(ShaderCacheEntry(*this, key));
  2101. cd->name = binary_data.shader_name;
  2102. cd->stage = shader_data.stage;
  2103. options.preserveInvariance = shader_data.is_position_invariant;
  2104. options.fastMathEnabled = YES;
  2105. MDLibrary *library = [MDLibrary newLibraryWithCacheEntry:cd
  2106. device:device
  2107. source:source
  2108. options:options
  2109. strategy:_shader_load_strategy];
  2110. _shader_cache[key] = cd;
  2111. libraries[shader_data.stage] = library;
  2112. }
  2113. Vector<UniformSet> uniform_sets;
  2114. uniform_sets.resize(binary_data.uniforms.size());
  2115. r_shader_desc.uniform_sets.resize(binary_data.uniforms.size());
  2116. // Create sets.
  2117. for (UniformSetData &uniform_set : binary_data.uniforms) {
  2118. UniformSet &set = uniform_sets.write[uniform_set.index];
  2119. set.uniforms.resize(uniform_set.uniforms.size());
  2120. Vector<ShaderUniform> &uset = r_shader_desc.uniform_sets.write[uniform_set.index];
  2121. uset.resize(uniform_set.uniforms.size());
  2122. for (uint32_t i = 0; i < uniform_set.uniforms.size(); i++) {
  2123. UniformData &uniform = uniform_set.uniforms[i];
  2124. ShaderUniform su;
  2125. su.type = uniform.type;
  2126. su.writable = uniform.writable;
  2127. su.length = uniform.length;
  2128. su.binding = uniform.binding;
  2129. su.stages = uniform.stages;
  2130. uset.write[i] = su;
  2131. UniformInfo ui;
  2132. ui.binding = uniform.binding;
  2133. ui.active_stages = uniform.active_stages;
  2134. for (KeyValue<RDC::ShaderStage, BindingInfo> &kv : uniform.bindings) {
  2135. ui.bindings.insert(kv.key, kv.value);
  2136. }
  2137. for (KeyValue<RDC::ShaderStage, BindingInfo> &kv : uniform.bindings_secondary) {
  2138. ui.bindings_secondary.insert(kv.key, kv.value);
  2139. }
  2140. set.uniforms[i] = ui;
  2141. }
  2142. }
  2143. for (UniformSetData &uniform_set : binary_data.uniforms) {
  2144. UniformSet &set = uniform_sets.write[uniform_set.index];
  2145. // Make encoders.
  2146. for (ShaderStageData const &stage_data : binary_data.stages) {
  2147. ShaderStage stage = stage_data.stage;
  2148. NSMutableArray<MTLArgumentDescriptor *> *descriptors = [NSMutableArray new];
  2149. for (UniformInfo const &uniform : set.uniforms) {
  2150. BindingInfo const *binding_info = uniform.bindings.getptr(stage);
  2151. if (binding_info == nullptr) {
  2152. continue;
  2153. }
  2154. [descriptors addObject:binding_info->new_argument_descriptor()];
  2155. BindingInfo const *secondary_binding_info = uniform.bindings_secondary.getptr(stage);
  2156. if (secondary_binding_info != nullptr) {
  2157. [descriptors addObject:secondary_binding_info->new_argument_descriptor()];
  2158. }
  2159. }
  2160. if (descriptors.count == 0) {
  2161. // No bindings.
  2162. continue;
  2163. }
  2164. // Sort by index.
  2165. [descriptors sortUsingComparator:^NSComparisonResult(MTLArgumentDescriptor *a, MTLArgumentDescriptor *b) {
  2166. if (a.index < b.index) {
  2167. return NSOrderedAscending;
  2168. } else if (a.index > b.index) {
  2169. return NSOrderedDescending;
  2170. } else {
  2171. return NSOrderedSame;
  2172. }
  2173. }];
  2174. id<MTLArgumentEncoder> enc = [device newArgumentEncoderWithArguments:descriptors];
  2175. set.encoders[stage] = enc;
  2176. set.offsets[stage] = set.buffer_size;
  2177. set.buffer_size += enc.encodedLength;
  2178. }
  2179. }
  2180. r_shader_desc.specialization_constants.resize(binary_data.constants.size());
  2181. for (uint32_t i = 0; i < binary_data.constants.size(); i++) {
  2182. SpecializationConstantData &c = binary_data.constants[i];
  2183. ShaderSpecializationConstant sc;
  2184. sc.type = c.type;
  2185. sc.constant_id = c.constant_id;
  2186. sc.int_value = c.int_value;
  2187. sc.stages = c.stages;
  2188. r_shader_desc.specialization_constants.write[i] = sc;
  2189. }
  2190. MDShader *shader = nullptr;
  2191. if (binary_data.is_compute()) {
  2192. MDComputeShader *cs = new MDComputeShader(
  2193. binary_data.shader_name,
  2194. uniform_sets,
  2195. binary_data.uses_argument_buffers(),
  2196. libraries[ShaderStage::SHADER_STAGE_COMPUTE]);
  2197. uint32_t *binding = binary_data.push_constant.msl_binding.getptr(SHADER_STAGE_COMPUTE);
  2198. if (binding) {
  2199. cs->push_constants.size = binary_data.push_constant.size;
  2200. cs->push_constants.binding = *binding;
  2201. }
  2202. cs->local = MTLSizeMake(binary_data.compute_local_size.x, binary_data.compute_local_size.y, binary_data.compute_local_size.z);
  2203. #if DEV_ENABLED
  2204. cs->kernel_source = binary_data.stages[0].source;
  2205. #endif
  2206. shader = cs;
  2207. } else {
  2208. MDRenderShader *rs = new MDRenderShader(
  2209. binary_data.shader_name,
  2210. uniform_sets,
  2211. binary_data.needs_view_mask_buffer(),
  2212. binary_data.uses_argument_buffers(),
  2213. libraries[ShaderStage::SHADER_STAGE_VERTEX],
  2214. libraries[ShaderStage::SHADER_STAGE_FRAGMENT]);
  2215. uint32_t *vert_binding = binary_data.push_constant.msl_binding.getptr(SHADER_STAGE_VERTEX);
  2216. if (vert_binding) {
  2217. rs->push_constants.vert.size = binary_data.push_constant.size;
  2218. rs->push_constants.vert.binding = *vert_binding;
  2219. }
  2220. uint32_t *frag_binding = binary_data.push_constant.msl_binding.getptr(SHADER_STAGE_FRAGMENT);
  2221. if (frag_binding) {
  2222. rs->push_constants.frag.size = binary_data.push_constant.size;
  2223. rs->push_constants.frag.binding = *frag_binding;
  2224. }
  2225. #if DEV_ENABLED
  2226. for (ShaderStageData &stage_data : binary_data.stages) {
  2227. if (stage_data.stage == ShaderStage::SHADER_STAGE_VERTEX) {
  2228. rs->vert_source = stage_data.source;
  2229. } else if (stage_data.stage == ShaderStage::SHADER_STAGE_FRAGMENT) {
  2230. rs->frag_source = stage_data.source;
  2231. }
  2232. }
  2233. #endif
  2234. shader = rs;
  2235. }
  2236. r_shader_desc.vertex_input_mask = binary_data.vertex_input_mask;
  2237. r_shader_desc.fragment_output_mask = binary_data.fragment_output_mask;
  2238. r_shader_desc.is_compute = binary_data.is_compute();
  2239. r_shader_desc.compute_local_size[0] = binary_data.compute_local_size.x;
  2240. r_shader_desc.compute_local_size[1] = binary_data.compute_local_size.y;
  2241. r_shader_desc.compute_local_size[2] = binary_data.compute_local_size.z;
  2242. r_shader_desc.push_constant_size = binary_data.push_constant.size;
  2243. return ShaderID(shader);
  2244. }
  2245. void RenderingDeviceDriverMetal::shader_free(ShaderID p_shader) {
  2246. MDShader *obj = (MDShader *)p_shader.id;
  2247. delete obj;
  2248. }
  2249. void RenderingDeviceDriverMetal::shader_destroy_modules(ShaderID p_shader) {
  2250. // TODO.
  2251. }
  2252. /*********************/
  2253. /**** UNIFORM SET ****/
  2254. /*********************/
  2255. RDD::UniformSetID RenderingDeviceDriverMetal::uniform_set_create(VectorView<BoundUniform> p_uniforms, ShaderID p_shader, uint32_t p_set_index, int p_linear_pool_index) {
  2256. //p_linear_pool_index = -1; // TODO:? Linear pools not implemented or not supported by API backend.
  2257. MDUniformSet *set = memnew(MDUniformSet);
  2258. Vector<BoundUniform> bound_uniforms;
  2259. bound_uniforms.resize(p_uniforms.size());
  2260. for (uint32_t i = 0; i < p_uniforms.size(); i += 1) {
  2261. bound_uniforms.write[i] = p_uniforms[i];
  2262. }
  2263. set->uniforms = bound_uniforms;
  2264. set->index = p_set_index;
  2265. return UniformSetID(set);
  2266. }
  2267. void RenderingDeviceDriverMetal::uniform_set_free(UniformSetID p_uniform_set) {
  2268. MDUniformSet *obj = (MDUniformSet *)p_uniform_set.id;
  2269. memdelete(obj);
  2270. }
  2271. void RenderingDeviceDriverMetal::command_uniform_set_prepare_for_use(CommandBufferID p_cmd_buffer, UniformSetID p_uniform_set, ShaderID p_shader, uint32_t p_set_index) {
  2272. }
  2273. #pragma mark - Transfer
  2274. void RenderingDeviceDriverMetal::command_clear_buffer(CommandBufferID p_cmd_buffer, BufferID p_buffer, uint64_t p_offset, uint64_t p_size) {
  2275. MDCommandBuffer *cmd = (MDCommandBuffer *)(p_cmd_buffer.id);
  2276. id<MTLBuffer> buffer = rid::get(p_buffer);
  2277. id<MTLBlitCommandEncoder> blit = cmd->blit_command_encoder();
  2278. [blit fillBuffer:buffer
  2279. range:NSMakeRange(p_offset, p_size)
  2280. value:0];
  2281. }
  2282. void RenderingDeviceDriverMetal::command_copy_buffer(CommandBufferID p_cmd_buffer, BufferID p_src_buffer, BufferID p_dst_buffer, VectorView<BufferCopyRegion> p_regions) {
  2283. MDCommandBuffer *cmd = (MDCommandBuffer *)(p_cmd_buffer.id);
  2284. id<MTLBuffer> src = rid::get(p_src_buffer);
  2285. id<MTLBuffer> dst = rid::get(p_dst_buffer);
  2286. id<MTLBlitCommandEncoder> blit = cmd->blit_command_encoder();
  2287. for (uint32_t i = 0; i < p_regions.size(); i++) {
  2288. BufferCopyRegion region = p_regions[i];
  2289. [blit copyFromBuffer:src
  2290. sourceOffset:region.src_offset
  2291. toBuffer:dst
  2292. destinationOffset:region.dst_offset
  2293. size:region.size];
  2294. }
  2295. }
  2296. MTLSize MTLSizeFromVector3i(Vector3i p_size) {
  2297. return MTLSizeMake(p_size.x, p_size.y, p_size.z);
  2298. }
  2299. MTLOrigin MTLOriginFromVector3i(Vector3i p_origin) {
  2300. return MTLOriginMake(p_origin.x, p_origin.y, p_origin.z);
  2301. }
  2302. // Clamps the size so that the sum of the origin and size do not exceed the maximum size.
  2303. static inline MTLSize clampMTLSize(MTLSize p_size, MTLOrigin p_origin, MTLSize p_max_size) {
  2304. MTLSize clamped;
  2305. clamped.width = MIN(p_size.width, p_max_size.width - p_origin.x);
  2306. clamped.height = MIN(p_size.height, p_max_size.height - p_origin.y);
  2307. clamped.depth = MIN(p_size.depth, p_max_size.depth - p_origin.z);
  2308. return clamped;
  2309. }
  2310. void RenderingDeviceDriverMetal::command_copy_texture(CommandBufferID p_cmd_buffer, TextureID p_src_texture, TextureLayout p_src_texture_layout, TextureID p_dst_texture, TextureLayout p_dst_texture_layout, VectorView<TextureCopyRegion> p_regions) {
  2311. MDCommandBuffer *cmd = (MDCommandBuffer *)(p_cmd_buffer.id);
  2312. id<MTLTexture> src = rid::get(p_src_texture);
  2313. id<MTLTexture> dst = rid::get(p_dst_texture);
  2314. id<MTLBlitCommandEncoder> blit = cmd->blit_command_encoder();
  2315. PixelFormats &pf = *pixel_formats;
  2316. MTLPixelFormat src_fmt = src.pixelFormat;
  2317. bool src_is_compressed = pf.getFormatType(src_fmt) == MTLFormatType::Compressed;
  2318. MTLPixelFormat dst_fmt = dst.pixelFormat;
  2319. bool dst_is_compressed = pf.getFormatType(dst_fmt) == MTLFormatType::Compressed;
  2320. // Validate copy.
  2321. if (src.sampleCount != dst.sampleCount || pf.getBytesPerBlock(src_fmt) != pf.getBytesPerBlock(dst_fmt)) {
  2322. ERR_FAIL_MSG("Cannot copy between incompatible pixel formats, such as formats of different pixel sizes, or between images with different sample counts.");
  2323. }
  2324. // If source and destination have different formats and at least one is compressed, a temporary buffer is required.
  2325. bool need_tmp_buffer = (src_fmt != dst_fmt) && (src_is_compressed || dst_is_compressed);
  2326. if (need_tmp_buffer) {
  2327. ERR_FAIL_MSG("not implemented: copy with intermediate buffer");
  2328. }
  2329. if (src_fmt != dst_fmt) {
  2330. // Map the source pixel format to the dst through a texture view on the source texture.
  2331. src = [src newTextureViewWithPixelFormat:dst_fmt];
  2332. }
  2333. for (uint32_t i = 0; i < p_regions.size(); i++) {
  2334. TextureCopyRegion region = p_regions[i];
  2335. MTLSize extent = MTLSizeFromVector3i(region.size);
  2336. // If copies can be performed using direct texture-texture copying, do so.
  2337. uint32_t src_level = region.src_subresources.mipmap;
  2338. uint32_t src_base_layer = region.src_subresources.base_layer;
  2339. MTLSize src_extent = mipmapLevelSizeFromTexture(src, src_level);
  2340. uint32_t dst_level = region.dst_subresources.mipmap;
  2341. uint32_t dst_base_layer = region.dst_subresources.base_layer;
  2342. MTLSize dst_extent = mipmapLevelSizeFromTexture(dst, dst_level);
  2343. // All layers may be copied at once, if the extent completely covers both images.
  2344. if (src_extent == extent && dst_extent == extent) {
  2345. [blit copyFromTexture:src
  2346. sourceSlice:src_base_layer
  2347. sourceLevel:src_level
  2348. toTexture:dst
  2349. destinationSlice:dst_base_layer
  2350. destinationLevel:dst_level
  2351. sliceCount:region.src_subresources.layer_count
  2352. levelCount:1];
  2353. } else {
  2354. MTLOrigin src_origin = MTLOriginFromVector3i(region.src_offset);
  2355. MTLSize src_size = clampMTLSize(extent, src_origin, src_extent);
  2356. uint32_t layer_count = 0;
  2357. if ((src.textureType == MTLTextureType3D) != (dst.textureType == MTLTextureType3D)) {
  2358. // In the case, the number of layers to copy is in extent.depth. Use that value,
  2359. // then clamp the depth, so we don't try to copy more than Metal will allow.
  2360. layer_count = extent.depth;
  2361. src_size.depth = 1;
  2362. } else {
  2363. layer_count = region.src_subresources.layer_count;
  2364. }
  2365. MTLOrigin dst_origin = MTLOriginFromVector3i(region.dst_offset);
  2366. for (uint32_t layer = 0; layer < layer_count; layer++) {
  2367. // We can copy between a 3D and a 2D image easily. Just copy between
  2368. // one slice of the 2D image and one plane of the 3D image at a time.
  2369. if ((src.textureType == MTLTextureType3D) == (dst.textureType == MTLTextureType3D)) {
  2370. [blit copyFromTexture:src
  2371. sourceSlice:src_base_layer + layer
  2372. sourceLevel:src_level
  2373. sourceOrigin:src_origin
  2374. sourceSize:src_size
  2375. toTexture:dst
  2376. destinationSlice:dst_base_layer + layer
  2377. destinationLevel:dst_level
  2378. destinationOrigin:dst_origin];
  2379. } else if (src.textureType == MTLTextureType3D) {
  2380. [blit copyFromTexture:src
  2381. sourceSlice:src_base_layer
  2382. sourceLevel:src_level
  2383. sourceOrigin:MTLOriginMake(src_origin.x, src_origin.y, src_origin.z + layer)
  2384. sourceSize:src_size
  2385. toTexture:dst
  2386. destinationSlice:dst_base_layer + layer
  2387. destinationLevel:dst_level
  2388. destinationOrigin:dst_origin];
  2389. } else {
  2390. DEV_ASSERT(dst.textureType == MTLTextureType3D);
  2391. [blit copyFromTexture:src
  2392. sourceSlice:src_base_layer + layer
  2393. sourceLevel:src_level
  2394. sourceOrigin:src_origin
  2395. sourceSize:src_size
  2396. toTexture:dst
  2397. destinationSlice:dst_base_layer
  2398. destinationLevel:dst_level
  2399. destinationOrigin:MTLOriginMake(dst_origin.x, dst_origin.y, dst_origin.z + layer)];
  2400. }
  2401. }
  2402. }
  2403. }
  2404. }
  2405. void RenderingDeviceDriverMetal::command_resolve_texture(CommandBufferID p_cmd_buffer, TextureID p_src_texture, TextureLayout p_src_texture_layout, uint32_t p_src_layer, uint32_t p_src_mipmap, TextureID p_dst_texture, TextureLayout p_dst_texture_layout, uint32_t p_dst_layer, uint32_t p_dst_mipmap) {
  2406. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2407. id<MTLTexture> src_tex = rid::get(p_src_texture);
  2408. id<MTLTexture> dst_tex = rid::get(p_dst_texture);
  2409. MTLRenderPassDescriptor *mtlRPD = [MTLRenderPassDescriptor renderPassDescriptor];
  2410. MTLRenderPassColorAttachmentDescriptor *mtlColorAttDesc = mtlRPD.colorAttachments[0];
  2411. mtlColorAttDesc.loadAction = MTLLoadActionLoad;
  2412. mtlColorAttDesc.storeAction = MTLStoreActionMultisampleResolve;
  2413. mtlColorAttDesc.texture = src_tex;
  2414. mtlColorAttDesc.resolveTexture = dst_tex;
  2415. mtlColorAttDesc.level = p_src_mipmap;
  2416. mtlColorAttDesc.slice = p_src_layer;
  2417. mtlColorAttDesc.resolveLevel = p_dst_mipmap;
  2418. mtlColorAttDesc.resolveSlice = p_dst_layer;
  2419. cb->encodeRenderCommandEncoderWithDescriptor(mtlRPD, @"Resolve Image");
  2420. }
  2421. void RenderingDeviceDriverMetal::command_clear_color_texture(CommandBufferID p_cmd_buffer, TextureID p_texture, TextureLayout p_texture_layout, const Color &p_color, const TextureSubresourceRange &p_subresources) {
  2422. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2423. id<MTLTexture> src_tex = rid::get(p_texture);
  2424. if (src_tex.parentTexture) {
  2425. // Clear via the parent texture rather than the view.
  2426. src_tex = src_tex.parentTexture;
  2427. }
  2428. PixelFormats &pf = *pixel_formats;
  2429. if (pf.isDepthFormat(src_tex.pixelFormat) || pf.isStencilFormat(src_tex.pixelFormat)) {
  2430. ERR_FAIL_MSG("invalid: depth or stencil texture format");
  2431. }
  2432. MTLRenderPassDescriptor *desc = MTLRenderPassDescriptor.renderPassDescriptor;
  2433. if (p_subresources.aspect.has_flag(TEXTURE_ASPECT_COLOR_BIT)) {
  2434. MTLRenderPassColorAttachmentDescriptor *caDesc = desc.colorAttachments[0];
  2435. caDesc.texture = src_tex;
  2436. caDesc.loadAction = MTLLoadActionClear;
  2437. caDesc.storeAction = MTLStoreActionStore;
  2438. caDesc.clearColor = MTLClearColorMake(p_color.r, p_color.g, p_color.b, p_color.a);
  2439. // Extract the mipmap levels that are to be updated.
  2440. uint32_t mipLvlStart = p_subresources.base_mipmap;
  2441. uint32_t mipLvlCnt = p_subresources.mipmap_count;
  2442. uint32_t mipLvlEnd = mipLvlStart + mipLvlCnt;
  2443. uint32_t levelCount = src_tex.mipmapLevelCount;
  2444. // Extract the cube or array layers (slices) that are to be updated.
  2445. bool is3D = src_tex.textureType == MTLTextureType3D;
  2446. uint32_t layerStart = is3D ? 0 : p_subresources.base_layer;
  2447. uint32_t layerCnt = p_subresources.layer_count;
  2448. uint32_t layerEnd = layerStart + layerCnt;
  2449. MetalFeatures const &features = (*device_properties).features;
  2450. // Iterate across mipmap levels and layers, and perform and empty render to clear each.
  2451. for (uint32_t mipLvl = mipLvlStart; mipLvl < mipLvlEnd; mipLvl++) {
  2452. ERR_FAIL_INDEX_MSG(mipLvl, levelCount, "mip level out of range");
  2453. caDesc.level = mipLvl;
  2454. // If a 3D image, we need to get the depth for each level.
  2455. if (is3D) {
  2456. layerCnt = mipmapLevelSizeFromTexture(src_tex, mipLvl).depth;
  2457. layerEnd = layerStart + layerCnt;
  2458. }
  2459. if ((features.layeredRendering && src_tex.sampleCount == 1) || features.multisampleLayeredRendering) {
  2460. // We can clear all layers at once.
  2461. if (is3D) {
  2462. caDesc.depthPlane = layerStart;
  2463. } else {
  2464. caDesc.slice = layerStart;
  2465. }
  2466. desc.renderTargetArrayLength = layerCnt;
  2467. cb->encodeRenderCommandEncoderWithDescriptor(desc, @"Clear Image");
  2468. } else {
  2469. for (uint32_t layer = layerStart; layer < layerEnd; layer++) {
  2470. if (is3D) {
  2471. caDesc.depthPlane = layer;
  2472. } else {
  2473. caDesc.slice = layer;
  2474. }
  2475. cb->encodeRenderCommandEncoderWithDescriptor(desc, @"Clear Image");
  2476. }
  2477. }
  2478. }
  2479. }
  2480. }
  2481. API_AVAILABLE(macos(11.0), ios(14.0), tvos(14.0))
  2482. bool isArrayTexture(MTLTextureType p_type) {
  2483. return (p_type == MTLTextureType3D ||
  2484. p_type == MTLTextureType2DArray ||
  2485. p_type == MTLTextureType2DMultisampleArray ||
  2486. p_type == MTLTextureType1DArray);
  2487. }
  2488. void RenderingDeviceDriverMetal::_copy_texture_buffer(CommandBufferID p_cmd_buffer,
  2489. CopySource p_source,
  2490. TextureID p_texture,
  2491. BufferID p_buffer,
  2492. VectorView<BufferTextureCopyRegion> p_regions) {
  2493. MDCommandBuffer *cmd = (MDCommandBuffer *)(p_cmd_buffer.id);
  2494. id<MTLBuffer> buffer = rid::get(p_buffer);
  2495. id<MTLTexture> texture = rid::get(p_texture);
  2496. id<MTLBlitCommandEncoder> enc = cmd->blit_command_encoder();
  2497. PixelFormats &pf = *pixel_formats;
  2498. MTLPixelFormat mtlPixFmt = texture.pixelFormat;
  2499. MTLBlitOption options = MTLBlitOptionNone;
  2500. if (pf.isPVRTCFormat(mtlPixFmt)) {
  2501. options |= MTLBlitOptionRowLinearPVRTC;
  2502. }
  2503. for (uint32_t i = 0; i < p_regions.size(); i++) {
  2504. BufferTextureCopyRegion region = p_regions[i];
  2505. uint32_t mip_level = region.texture_subresources.mipmap;
  2506. MTLOrigin txt_origin = MTLOriginMake(region.texture_offset.x, region.texture_offset.y, region.texture_offset.z);
  2507. MTLSize src_extent = mipmapLevelSizeFromTexture(texture, mip_level);
  2508. MTLSize txt_size = clampMTLSize(MTLSizeMake(region.texture_region_size.x, region.texture_region_size.y, region.texture_region_size.z),
  2509. txt_origin,
  2510. src_extent);
  2511. uint32_t buffImgWd = region.texture_region_size.x;
  2512. uint32_t buffImgHt = region.texture_region_size.y;
  2513. NSUInteger bytesPerRow = pf.getBytesPerRow(mtlPixFmt, buffImgWd);
  2514. NSUInteger bytesPerImg = pf.getBytesPerLayer(mtlPixFmt, bytesPerRow, buffImgHt);
  2515. MTLBlitOption blit_options = options;
  2516. if (pf.isDepthFormat(mtlPixFmt) && pf.isStencilFormat(mtlPixFmt)) {
  2517. bool want_depth = flags::all(region.texture_subresources.aspect, TEXTURE_ASPECT_DEPTH_BIT);
  2518. bool want_stencil = flags::all(region.texture_subresources.aspect, TEXTURE_ASPECT_STENCIL_BIT);
  2519. // The stencil component is always 1 byte per pixel.
  2520. // Don't reduce depths of 32-bit depth/stencil formats.
  2521. if (want_depth && !want_stencil) {
  2522. if (pf.getBytesPerTexel(mtlPixFmt) != 4) {
  2523. bytesPerRow -= buffImgWd;
  2524. bytesPerImg -= buffImgWd * buffImgHt;
  2525. }
  2526. blit_options |= MTLBlitOptionDepthFromDepthStencil;
  2527. } else if (want_stencil && !want_depth) {
  2528. bytesPerRow = buffImgWd;
  2529. bytesPerImg = buffImgWd * buffImgHt;
  2530. blit_options |= MTLBlitOptionStencilFromDepthStencil;
  2531. }
  2532. }
  2533. if (!isArrayTexture(texture.textureType)) {
  2534. bytesPerImg = 0;
  2535. }
  2536. if (p_source == CopySource::Buffer) {
  2537. for (uint32_t lyrIdx = 0; lyrIdx < region.texture_subresources.layer_count; lyrIdx++) {
  2538. [enc copyFromBuffer:buffer
  2539. sourceOffset:region.buffer_offset + (bytesPerImg * lyrIdx)
  2540. sourceBytesPerRow:bytesPerRow
  2541. sourceBytesPerImage:bytesPerImg
  2542. sourceSize:txt_size
  2543. toTexture:texture
  2544. destinationSlice:region.texture_subresources.base_layer + lyrIdx
  2545. destinationLevel:mip_level
  2546. destinationOrigin:txt_origin
  2547. options:blit_options];
  2548. }
  2549. } else {
  2550. for (uint32_t lyrIdx = 0; lyrIdx < region.texture_subresources.layer_count; lyrIdx++) {
  2551. [enc copyFromTexture:texture
  2552. sourceSlice:region.texture_subresources.base_layer + lyrIdx
  2553. sourceLevel:mip_level
  2554. sourceOrigin:txt_origin
  2555. sourceSize:txt_size
  2556. toBuffer:buffer
  2557. destinationOffset:region.buffer_offset + (bytesPerImg * lyrIdx)
  2558. destinationBytesPerRow:bytesPerRow
  2559. destinationBytesPerImage:bytesPerImg
  2560. options:blit_options];
  2561. }
  2562. }
  2563. }
  2564. }
  2565. void RenderingDeviceDriverMetal::command_copy_buffer_to_texture(CommandBufferID p_cmd_buffer, BufferID p_src_buffer, TextureID p_dst_texture, TextureLayout p_dst_texture_layout, VectorView<BufferTextureCopyRegion> p_regions) {
  2566. _copy_texture_buffer(p_cmd_buffer, CopySource::Buffer, p_dst_texture, p_src_buffer, p_regions);
  2567. }
  2568. void RenderingDeviceDriverMetal::command_copy_texture_to_buffer(CommandBufferID p_cmd_buffer, TextureID p_src_texture, TextureLayout p_src_texture_layout, BufferID p_dst_buffer, VectorView<BufferTextureCopyRegion> p_regions) {
  2569. _copy_texture_buffer(p_cmd_buffer, CopySource::Texture, p_src_texture, p_dst_buffer, p_regions);
  2570. }
  2571. #pragma mark - Pipeline
  2572. void RenderingDeviceDriverMetal::pipeline_free(PipelineID p_pipeline_id) {
  2573. MDPipeline *obj = (MDPipeline *)(p_pipeline_id.id);
  2574. delete obj;
  2575. }
  2576. // ----- BINDING -----
  2577. void RenderingDeviceDriverMetal::command_bind_push_constants(CommandBufferID p_cmd_buffer, ShaderID p_shader, uint32_t p_dst_first_index, VectorView<uint32_t> p_data) {
  2578. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2579. MDShader *shader = (MDShader *)(p_shader.id);
  2580. shader->encode_push_constant_data(p_data, cb);
  2581. }
  2582. // ----- CACHE -----
  2583. String RenderingDeviceDriverMetal::_pipeline_get_cache_path() const {
  2584. String path = OS::get_singleton()->get_user_data_dir() + "/metal/pipelines";
  2585. path += "." + context_device.name.validate_filename().replace(" ", "_").to_lower();
  2586. if (Engine::get_singleton()->is_editor_hint()) {
  2587. path += ".editor";
  2588. }
  2589. path += ".cache";
  2590. return path;
  2591. }
  2592. bool RenderingDeviceDriverMetal::pipeline_cache_create(const Vector<uint8_t> &p_data) {
  2593. return false;
  2594. CharString path = _pipeline_get_cache_path().utf8();
  2595. NSString *nPath = [[NSString alloc] initWithBytesNoCopy:path.ptrw()
  2596. length:path.length()
  2597. encoding:NSUTF8StringEncoding
  2598. freeWhenDone:NO];
  2599. MTLBinaryArchiveDescriptor *desc = [MTLBinaryArchiveDescriptor new];
  2600. if ([[NSFileManager defaultManager] fileExistsAtPath:nPath]) {
  2601. desc.url = [NSURL fileURLWithPath:nPath];
  2602. }
  2603. NSError *error = nil;
  2604. archive = [device newBinaryArchiveWithDescriptor:desc error:&error];
  2605. return true;
  2606. }
  2607. void RenderingDeviceDriverMetal::pipeline_cache_free() {
  2608. archive = nil;
  2609. }
  2610. size_t RenderingDeviceDriverMetal::pipeline_cache_query_size() {
  2611. return archive_count * 1024;
  2612. }
  2613. Vector<uint8_t> RenderingDeviceDriverMetal::pipeline_cache_serialize() {
  2614. if (!archive) {
  2615. return Vector<uint8_t>();
  2616. }
  2617. CharString path = _pipeline_get_cache_path().utf8();
  2618. NSString *nPath = [[NSString alloc] initWithBytesNoCopy:path.ptrw()
  2619. length:path.length()
  2620. encoding:NSUTF8StringEncoding
  2621. freeWhenDone:NO];
  2622. NSURL *target = [NSURL fileURLWithPath:nPath];
  2623. NSError *error = nil;
  2624. if ([archive serializeToURL:target error:&error]) {
  2625. return Vector<uint8_t>();
  2626. } else {
  2627. print_line(error.localizedDescription.UTF8String);
  2628. return Vector<uint8_t>();
  2629. }
  2630. }
  2631. #pragma mark - Rendering
  2632. // ----- SUBPASS -----
  2633. RDD::RenderPassID RenderingDeviceDriverMetal::render_pass_create(VectorView<Attachment> p_attachments, VectorView<Subpass> p_subpasses, VectorView<SubpassDependency> p_subpass_dependencies, uint32_t p_view_count) {
  2634. PixelFormats &pf = *pixel_formats;
  2635. size_t subpass_count = p_subpasses.size();
  2636. Vector<MDSubpass> subpasses;
  2637. subpasses.resize(subpass_count);
  2638. for (uint32_t i = 0; i < subpass_count; i++) {
  2639. MDSubpass &subpass = subpasses.write[i];
  2640. subpass.subpass_index = i;
  2641. subpass.view_count = p_view_count;
  2642. subpass.input_references = p_subpasses[i].input_references;
  2643. subpass.color_references = p_subpasses[i].color_references;
  2644. subpass.depth_stencil_reference = p_subpasses[i].depth_stencil_reference;
  2645. subpass.resolve_references = p_subpasses[i].resolve_references;
  2646. }
  2647. static const MTLLoadAction LOAD_ACTIONS[] = {
  2648. [ATTACHMENT_LOAD_OP_LOAD] = MTLLoadActionLoad,
  2649. [ATTACHMENT_LOAD_OP_CLEAR] = MTLLoadActionClear,
  2650. [ATTACHMENT_LOAD_OP_DONT_CARE] = MTLLoadActionDontCare,
  2651. };
  2652. static const MTLStoreAction STORE_ACTIONS[] = {
  2653. [ATTACHMENT_STORE_OP_STORE] = MTLStoreActionStore,
  2654. [ATTACHMENT_STORE_OP_DONT_CARE] = MTLStoreActionDontCare,
  2655. };
  2656. Vector<MDAttachment> attachments;
  2657. attachments.resize(p_attachments.size());
  2658. for (uint32_t i = 0; i < p_attachments.size(); i++) {
  2659. Attachment const &a = p_attachments[i];
  2660. MDAttachment &mda = attachments.write[i];
  2661. MTLPixelFormat format = pf.getMTLPixelFormat(a.format);
  2662. mda.format = format;
  2663. if (a.samples > TEXTURE_SAMPLES_1) {
  2664. mda.samples = (*device_properties).find_nearest_supported_sample_count(a.samples);
  2665. }
  2666. mda.loadAction = LOAD_ACTIONS[a.load_op];
  2667. mda.storeAction = STORE_ACTIONS[a.store_op];
  2668. bool is_depth = pf.isDepthFormat(format);
  2669. if (is_depth) {
  2670. mda.type |= MDAttachmentType::Depth;
  2671. }
  2672. bool is_stencil = pf.isStencilFormat(format);
  2673. if (is_stencil) {
  2674. mda.type |= MDAttachmentType::Stencil;
  2675. mda.stencilLoadAction = LOAD_ACTIONS[a.stencil_load_op];
  2676. mda.stencilStoreAction = STORE_ACTIONS[a.stencil_store_op];
  2677. }
  2678. if (!is_depth && !is_stencil) {
  2679. mda.type |= MDAttachmentType::Color;
  2680. }
  2681. }
  2682. MDRenderPass *obj = new MDRenderPass(attachments, subpasses);
  2683. return RenderPassID(obj);
  2684. }
  2685. void RenderingDeviceDriverMetal::render_pass_free(RenderPassID p_render_pass) {
  2686. MDRenderPass *obj = (MDRenderPass *)(p_render_pass.id);
  2687. delete obj;
  2688. }
  2689. // ----- COMMANDS -----
  2690. void RenderingDeviceDriverMetal::command_begin_render_pass(CommandBufferID p_cmd_buffer, RenderPassID p_render_pass, FramebufferID p_framebuffer, CommandBufferType p_cmd_buffer_type, const Rect2i &p_rect, VectorView<RenderPassClearValue> p_clear_values) {
  2691. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2692. cb->render_begin_pass(p_render_pass, p_framebuffer, p_cmd_buffer_type, p_rect, p_clear_values);
  2693. }
  2694. void RenderingDeviceDriverMetal::command_end_render_pass(CommandBufferID p_cmd_buffer) {
  2695. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2696. cb->render_end_pass();
  2697. }
  2698. void RenderingDeviceDriverMetal::command_next_render_subpass(CommandBufferID p_cmd_buffer, CommandBufferType p_cmd_buffer_type) {
  2699. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2700. cb->render_next_subpass();
  2701. }
  2702. void RenderingDeviceDriverMetal::command_render_set_viewport(CommandBufferID p_cmd_buffer, VectorView<Rect2i> p_viewports) {
  2703. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2704. cb->render_set_viewport(p_viewports);
  2705. }
  2706. void RenderingDeviceDriverMetal::command_render_set_scissor(CommandBufferID p_cmd_buffer, VectorView<Rect2i> p_scissors) {
  2707. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2708. cb->render_set_scissor(p_scissors);
  2709. }
  2710. void RenderingDeviceDriverMetal::command_render_clear_attachments(CommandBufferID p_cmd_buffer, VectorView<AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) {
  2711. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2712. cb->render_clear_attachments(p_attachment_clears, p_rects);
  2713. }
  2714. void RenderingDeviceDriverMetal::command_bind_render_pipeline(CommandBufferID p_cmd_buffer, PipelineID p_pipeline) {
  2715. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2716. cb->bind_pipeline(p_pipeline);
  2717. }
  2718. void RenderingDeviceDriverMetal::command_bind_render_uniform_set(CommandBufferID p_cmd_buffer, UniformSetID p_uniform_set, ShaderID p_shader, uint32_t p_set_index) {
  2719. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2720. cb->render_bind_uniform_set(p_uniform_set, p_shader, p_set_index);
  2721. }
  2722. void RenderingDeviceDriverMetal::command_bind_render_uniform_sets(CommandBufferID p_cmd_buffer, VectorView<UniformSetID> p_uniform_sets, ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  2723. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2724. cb->render_bind_uniform_sets(p_uniform_sets, p_shader, p_first_set_index, p_set_count);
  2725. }
  2726. void RenderingDeviceDriverMetal::command_render_draw(CommandBufferID p_cmd_buffer, uint32_t p_vertex_count, uint32_t p_instance_count, uint32_t p_base_vertex, uint32_t p_first_instance) {
  2727. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2728. cb->render_draw(p_vertex_count, p_instance_count, p_base_vertex, p_first_instance);
  2729. }
  2730. void RenderingDeviceDriverMetal::command_render_draw_indexed(CommandBufferID p_cmd_buffer, uint32_t p_index_count, uint32_t p_instance_count, uint32_t p_first_index, int32_t p_vertex_offset, uint32_t p_first_instance) {
  2731. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2732. cb->render_draw_indexed(p_index_count, p_instance_count, p_first_index, p_vertex_offset, p_first_instance);
  2733. }
  2734. void RenderingDeviceDriverMetal::command_render_draw_indexed_indirect(CommandBufferID p_cmd_buffer, BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  2735. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2736. cb->render_draw_indexed_indirect(p_indirect_buffer, p_offset, p_draw_count, p_stride);
  2737. }
  2738. void RenderingDeviceDriverMetal::command_render_draw_indexed_indirect_count(CommandBufferID p_cmd_buffer, BufferID p_indirect_buffer, uint64_t p_offset, BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  2739. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2740. cb->render_draw_indexed_indirect_count(p_indirect_buffer, p_offset, p_count_buffer, p_count_buffer_offset, p_max_draw_count, p_stride);
  2741. }
  2742. void RenderingDeviceDriverMetal::command_render_draw_indirect(CommandBufferID p_cmd_buffer, BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  2743. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2744. cb->render_draw_indirect(p_indirect_buffer, p_offset, p_draw_count, p_stride);
  2745. }
  2746. void RenderingDeviceDriverMetal::command_render_draw_indirect_count(CommandBufferID p_cmd_buffer, BufferID p_indirect_buffer, uint64_t p_offset, BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  2747. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2748. cb->render_draw_indirect_count(p_indirect_buffer, p_offset, p_count_buffer, p_count_buffer_offset, p_max_draw_count, p_stride);
  2749. }
  2750. void RenderingDeviceDriverMetal::command_render_bind_vertex_buffers(CommandBufferID p_cmd_buffer, uint32_t p_binding_count, const BufferID *p_buffers, const uint64_t *p_offsets) {
  2751. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2752. cb->render_bind_vertex_buffers(p_binding_count, p_buffers, p_offsets);
  2753. }
  2754. void RenderingDeviceDriverMetal::command_render_bind_index_buffer(CommandBufferID p_cmd_buffer, BufferID p_buffer, IndexBufferFormat p_format, uint64_t p_offset) {
  2755. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2756. cb->render_bind_index_buffer(p_buffer, p_format, p_offset);
  2757. }
  2758. void RenderingDeviceDriverMetal::command_render_set_blend_constants(CommandBufferID p_cmd_buffer, const Color &p_constants) {
  2759. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  2760. cb->render_set_blend_constants(p_constants);
  2761. }
  2762. void RenderingDeviceDriverMetal::command_render_set_line_width(CommandBufferID p_cmd_buffer, float p_width) {
  2763. if (!Math::is_equal_approx(p_width, 1.0f)) {
  2764. ERR_FAIL_MSG("Setting line widths other than 1.0 is not supported by the Metal rendering driver.");
  2765. }
  2766. }
  2767. // ----- PIPELINE -----
  2768. RenderingDeviceDriverMetal::Result<id<MTLFunction>> RenderingDeviceDriverMetal::_create_function(MDLibrary *p_library, NSString *p_name, VectorView<PipelineSpecializationConstant> &p_specialization_constants) {
  2769. id<MTLLibrary> library = p_library.library;
  2770. if (!library) {
  2771. ERR_FAIL_V_MSG(ERR_CANT_CREATE, "Failed to compile Metal library");
  2772. }
  2773. id<MTLFunction> function = [library newFunctionWithName:p_name];
  2774. ERR_FAIL_NULL_V_MSG(function, ERR_CANT_CREATE, "No function named main0");
  2775. if (function.functionConstantsDictionary.count == 0) {
  2776. return function;
  2777. }
  2778. NSArray<MTLFunctionConstant *> *constants = function.functionConstantsDictionary.allValues;
  2779. bool is_sorted = true;
  2780. for (uint32_t i = 1; i < constants.count; i++) {
  2781. if (constants[i - 1].index > constants[i].index) {
  2782. is_sorted = false;
  2783. break;
  2784. }
  2785. }
  2786. if (!is_sorted) {
  2787. constants = [constants sortedArrayUsingComparator:^NSComparisonResult(MTLFunctionConstant *a, MTLFunctionConstant *b) {
  2788. if (a.index < b.index) {
  2789. return NSOrderedAscending;
  2790. } else if (a.index > b.index) {
  2791. return NSOrderedDescending;
  2792. } else {
  2793. return NSOrderedSame;
  2794. }
  2795. }];
  2796. }
  2797. // Initialize an array of integers representing the indexes of p_specialization_constants
  2798. uint32_t *indexes = (uint32_t *)alloca(p_specialization_constants.size() * sizeof(uint32_t));
  2799. for (uint32_t i = 0; i < p_specialization_constants.size(); i++) {
  2800. indexes[i] = i;
  2801. }
  2802. // Sort the array of integers based on the values in p_specialization_constants
  2803. std::sort(indexes, &indexes[p_specialization_constants.size()], [&](int a, int b) {
  2804. return p_specialization_constants[a].constant_id < p_specialization_constants[b].constant_id;
  2805. });
  2806. MTLFunctionConstantValues *constantValues = [MTLFunctionConstantValues new];
  2807. uint32_t i = 0;
  2808. uint32_t j = 0;
  2809. while (i < constants.count && j < p_specialization_constants.size()) {
  2810. MTLFunctionConstant *curr = constants[i];
  2811. PipelineSpecializationConstant const &sc = p_specialization_constants[indexes[j]];
  2812. if (curr.index == sc.constant_id) {
  2813. switch (curr.type) {
  2814. case MTLDataTypeBool:
  2815. case MTLDataTypeFloat:
  2816. case MTLDataTypeInt:
  2817. case MTLDataTypeUInt: {
  2818. [constantValues setConstantValue:&sc.int_value
  2819. type:curr.type
  2820. atIndex:sc.constant_id];
  2821. } break;
  2822. default:
  2823. ERR_FAIL_V_MSG(function, "Invalid specialization constant type");
  2824. }
  2825. i++;
  2826. j++;
  2827. } else if (curr.index < sc.constant_id) {
  2828. i++;
  2829. } else {
  2830. j++;
  2831. }
  2832. }
  2833. if (i != constants.count) {
  2834. MTLFunctionConstant *curr = constants[i];
  2835. if (curr.index == R32UI_ALIGNMENT_CONSTANT_ID) {
  2836. uint32_t alignment = 16; // TODO(sgc): is this always correct?
  2837. [constantValues setConstantValue:&alignment
  2838. type:curr.type
  2839. atIndex:curr.index];
  2840. i++;
  2841. }
  2842. }
  2843. NSError *err = nil;
  2844. function = [library newFunctionWithName:@"main0"
  2845. constantValues:constantValues
  2846. error:&err];
  2847. ERR_FAIL_NULL_V_MSG(function, ERR_CANT_CREATE, String("specialized function failed: ") + err.localizedDescription.UTF8String);
  2848. return function;
  2849. }
  2850. // RDD::PolygonCullMode == MTLCullMode.
  2851. static_assert(ENUM_MEMBERS_EQUAL(RDD::POLYGON_CULL_DISABLED, MTLCullModeNone));
  2852. static_assert(ENUM_MEMBERS_EQUAL(RDD::POLYGON_CULL_FRONT, MTLCullModeFront));
  2853. static_assert(ENUM_MEMBERS_EQUAL(RDD::POLYGON_CULL_BACK, MTLCullModeBack));
  2854. // RDD::StencilOperation == MTLStencilOperation.
  2855. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_KEEP, MTLStencilOperationKeep));
  2856. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_ZERO, MTLStencilOperationZero));
  2857. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_REPLACE, MTLStencilOperationReplace));
  2858. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_INCREMENT_AND_CLAMP, MTLStencilOperationIncrementClamp));
  2859. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_DECREMENT_AND_CLAMP, MTLStencilOperationDecrementClamp));
  2860. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_INVERT, MTLStencilOperationInvert));
  2861. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_INCREMENT_AND_WRAP, MTLStencilOperationIncrementWrap));
  2862. static_assert(ENUM_MEMBERS_EQUAL(RDD::STENCIL_OP_DECREMENT_AND_WRAP, MTLStencilOperationDecrementWrap));
  2863. // RDD::BlendOperation == MTLBlendOperation.
  2864. static_assert(ENUM_MEMBERS_EQUAL(RDD::BLEND_OP_ADD, MTLBlendOperationAdd));
  2865. static_assert(ENUM_MEMBERS_EQUAL(RDD::BLEND_OP_SUBTRACT, MTLBlendOperationSubtract));
  2866. static_assert(ENUM_MEMBERS_EQUAL(RDD::BLEND_OP_REVERSE_SUBTRACT, MTLBlendOperationReverseSubtract));
  2867. static_assert(ENUM_MEMBERS_EQUAL(RDD::BLEND_OP_MINIMUM, MTLBlendOperationMin));
  2868. static_assert(ENUM_MEMBERS_EQUAL(RDD::BLEND_OP_MAXIMUM, MTLBlendOperationMax));
  2869. RDD::PipelineID RenderingDeviceDriverMetal::render_pipeline_create(
  2870. ShaderID p_shader,
  2871. VertexFormatID p_vertex_format,
  2872. RenderPrimitive p_render_primitive,
  2873. PipelineRasterizationState p_rasterization_state,
  2874. PipelineMultisampleState p_multisample_state,
  2875. PipelineDepthStencilState p_depth_stencil_state,
  2876. PipelineColorBlendState p_blend_state,
  2877. VectorView<int32_t> p_color_attachments,
  2878. BitField<PipelineDynamicStateFlags> p_dynamic_state,
  2879. RenderPassID p_render_pass,
  2880. uint32_t p_render_subpass,
  2881. VectorView<PipelineSpecializationConstant> p_specialization_constants) {
  2882. MDRenderShader *shader = (MDRenderShader *)(p_shader.id);
  2883. MTLVertexDescriptor *vert_desc = rid::get(p_vertex_format);
  2884. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  2885. os_signpost_id_t reflect_id = os_signpost_id_make_with_pointer(LOG_INTERVALS, shader);
  2886. os_signpost_interval_begin(LOG_INTERVALS, reflect_id, "render_pipeline_create", "shader_name=%{public}s", shader->name.get_data());
  2887. DEFER([=]() {
  2888. os_signpost_interval_end(LOG_INTERVALS, reflect_id, "render_pipeline_create");
  2889. });
  2890. os_signpost_event_emit(LOG_DRIVER, OS_SIGNPOST_ID_EXCLUSIVE, "create_pipeline");
  2891. MTLRenderPipelineDescriptor *desc = [MTLRenderPipelineDescriptor new];
  2892. {
  2893. MDSubpass const &subpass = pass->subpasses[p_render_subpass];
  2894. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  2895. uint32_t attachment = subpass.color_references[i].attachment;
  2896. if (attachment != AttachmentReference::UNUSED) {
  2897. MDAttachment const &a = pass->attachments[attachment];
  2898. desc.colorAttachments[i].pixelFormat = a.format;
  2899. }
  2900. }
  2901. if (subpass.depth_stencil_reference.attachment != AttachmentReference::UNUSED) {
  2902. uint32_t attachment = subpass.depth_stencil_reference.attachment;
  2903. MDAttachment const &a = pass->attachments[attachment];
  2904. if (a.type & MDAttachmentType::Depth) {
  2905. desc.depthAttachmentPixelFormat = a.format;
  2906. }
  2907. if (a.type & MDAttachmentType::Stencil) {
  2908. desc.stencilAttachmentPixelFormat = a.format;
  2909. }
  2910. }
  2911. }
  2912. desc.vertexDescriptor = vert_desc;
  2913. desc.label = [NSString stringWithUTF8String:shader->name.get_data()];
  2914. // Input assembly & tessellation.
  2915. MDRenderPipeline *pipeline = new MDRenderPipeline();
  2916. switch (p_render_primitive) {
  2917. case RENDER_PRIMITIVE_POINTS:
  2918. desc.inputPrimitiveTopology = MTLPrimitiveTopologyClassPoint;
  2919. break;
  2920. case RENDER_PRIMITIVE_LINES:
  2921. case RENDER_PRIMITIVE_LINES_WITH_ADJACENCY:
  2922. case RENDER_PRIMITIVE_LINESTRIPS_WITH_ADJACENCY:
  2923. case RENDER_PRIMITIVE_LINESTRIPS:
  2924. desc.inputPrimitiveTopology = MTLPrimitiveTopologyClassLine;
  2925. break;
  2926. case RENDER_PRIMITIVE_TRIANGLES:
  2927. case RENDER_PRIMITIVE_TRIANGLE_STRIPS:
  2928. case RENDER_PRIMITIVE_TRIANGLES_WITH_ADJACENCY:
  2929. case RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_AJACENCY:
  2930. case RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX:
  2931. desc.inputPrimitiveTopology = MTLPrimitiveTopologyClassTriangle;
  2932. break;
  2933. case RENDER_PRIMITIVE_TESSELATION_PATCH:
  2934. desc.maxTessellationFactor = p_rasterization_state.patch_control_points;
  2935. desc.tessellationPartitionMode = MTLTessellationPartitionModeInteger;
  2936. ERR_FAIL_V_MSG(PipelineID(), "tessellation not implemented");
  2937. break;
  2938. case RENDER_PRIMITIVE_MAX:
  2939. default:
  2940. desc.inputPrimitiveTopology = MTLPrimitiveTopologyClassUnspecified;
  2941. break;
  2942. }
  2943. switch (p_render_primitive) {
  2944. case RENDER_PRIMITIVE_POINTS:
  2945. pipeline->raster_state.render_primitive = MTLPrimitiveTypePoint;
  2946. break;
  2947. case RENDER_PRIMITIVE_LINES:
  2948. case RENDER_PRIMITIVE_LINES_WITH_ADJACENCY:
  2949. pipeline->raster_state.render_primitive = MTLPrimitiveTypeLine;
  2950. break;
  2951. case RENDER_PRIMITIVE_LINESTRIPS:
  2952. case RENDER_PRIMITIVE_LINESTRIPS_WITH_ADJACENCY:
  2953. pipeline->raster_state.render_primitive = MTLPrimitiveTypeLineStrip;
  2954. break;
  2955. case RENDER_PRIMITIVE_TRIANGLES:
  2956. case RENDER_PRIMITIVE_TRIANGLES_WITH_ADJACENCY:
  2957. pipeline->raster_state.render_primitive = MTLPrimitiveTypeTriangle;
  2958. break;
  2959. case RENDER_PRIMITIVE_TRIANGLE_STRIPS:
  2960. case RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_AJACENCY:
  2961. case RENDER_PRIMITIVE_TRIANGLE_STRIPS_WITH_RESTART_INDEX:
  2962. pipeline->raster_state.render_primitive = MTLPrimitiveTypeTriangleStrip;
  2963. break;
  2964. default:
  2965. break;
  2966. }
  2967. // Rasterization.
  2968. desc.rasterizationEnabled = !p_rasterization_state.discard_primitives;
  2969. pipeline->raster_state.clip_mode = p_rasterization_state.enable_depth_clamp ? MTLDepthClipModeClamp : MTLDepthClipModeClip;
  2970. pipeline->raster_state.fill_mode = p_rasterization_state.wireframe ? MTLTriangleFillModeLines : MTLTriangleFillModeFill;
  2971. static const MTLCullMode CULL_MODE[3] = {
  2972. MTLCullModeNone,
  2973. MTLCullModeFront,
  2974. MTLCullModeBack,
  2975. };
  2976. pipeline->raster_state.cull_mode = CULL_MODE[p_rasterization_state.cull_mode];
  2977. pipeline->raster_state.winding = (p_rasterization_state.front_face == POLYGON_FRONT_FACE_CLOCKWISE) ? MTLWindingClockwise : MTLWindingCounterClockwise;
  2978. pipeline->raster_state.depth_bias.enabled = p_rasterization_state.depth_bias_enabled;
  2979. pipeline->raster_state.depth_bias.depth_bias = p_rasterization_state.depth_bias_constant_factor;
  2980. pipeline->raster_state.depth_bias.slope_scale = p_rasterization_state.depth_bias_slope_factor;
  2981. pipeline->raster_state.depth_bias.clamp = p_rasterization_state.depth_bias_clamp;
  2982. // In Metal there is no line width.
  2983. if (!Math::is_equal_approx(p_rasterization_state.line_width, 1.0f)) {
  2984. WARN_PRINT("unsupported: line width");
  2985. }
  2986. // Multisample.
  2987. if (p_multisample_state.enable_sample_shading) {
  2988. WARN_PRINT("unsupported: multi-sample shading");
  2989. }
  2990. if (p_multisample_state.sample_count > TEXTURE_SAMPLES_1) {
  2991. pipeline->sample_count = (*device_properties).find_nearest_supported_sample_count(p_multisample_state.sample_count);
  2992. }
  2993. desc.rasterSampleCount = static_cast<NSUInteger>(pipeline->sample_count);
  2994. desc.alphaToCoverageEnabled = p_multisample_state.enable_alpha_to_coverage;
  2995. desc.alphaToOneEnabled = p_multisample_state.enable_alpha_to_one;
  2996. // Depth stencil.
  2997. if (p_depth_stencil_state.enable_depth_test && desc.depthAttachmentPixelFormat != MTLPixelFormatInvalid) {
  2998. pipeline->raster_state.depth_test.enabled = true;
  2999. MTLDepthStencilDescriptor *ds_desc = [MTLDepthStencilDescriptor new];
  3000. ds_desc.depthWriteEnabled = p_depth_stencil_state.enable_depth_write;
  3001. ds_desc.depthCompareFunction = COMPARE_OPERATORS[p_depth_stencil_state.depth_compare_operator];
  3002. if (p_depth_stencil_state.enable_depth_range) {
  3003. WARN_PRINT("unsupported: depth range");
  3004. }
  3005. if (p_depth_stencil_state.enable_stencil) {
  3006. pipeline->raster_state.stencil.front_reference = p_depth_stencil_state.front_op.reference;
  3007. pipeline->raster_state.stencil.back_reference = p_depth_stencil_state.back_op.reference;
  3008. {
  3009. // Front.
  3010. MTLStencilDescriptor *sd = [MTLStencilDescriptor new];
  3011. sd.stencilFailureOperation = STENCIL_OPERATIONS[p_depth_stencil_state.front_op.fail];
  3012. sd.depthStencilPassOperation = STENCIL_OPERATIONS[p_depth_stencil_state.front_op.pass];
  3013. sd.depthFailureOperation = STENCIL_OPERATIONS[p_depth_stencil_state.front_op.depth_fail];
  3014. sd.stencilCompareFunction = COMPARE_OPERATORS[p_depth_stencil_state.front_op.compare];
  3015. sd.readMask = p_depth_stencil_state.front_op.compare_mask;
  3016. sd.writeMask = p_depth_stencil_state.front_op.write_mask;
  3017. ds_desc.frontFaceStencil = sd;
  3018. }
  3019. {
  3020. // Back.
  3021. MTLStencilDescriptor *sd = [MTLStencilDescriptor new];
  3022. sd.stencilFailureOperation = STENCIL_OPERATIONS[p_depth_stencil_state.back_op.fail];
  3023. sd.depthStencilPassOperation = STENCIL_OPERATIONS[p_depth_stencil_state.back_op.pass];
  3024. sd.depthFailureOperation = STENCIL_OPERATIONS[p_depth_stencil_state.back_op.depth_fail];
  3025. sd.stencilCompareFunction = COMPARE_OPERATORS[p_depth_stencil_state.back_op.compare];
  3026. sd.readMask = p_depth_stencil_state.back_op.compare_mask;
  3027. sd.writeMask = p_depth_stencil_state.back_op.write_mask;
  3028. ds_desc.backFaceStencil = sd;
  3029. }
  3030. }
  3031. pipeline->depth_stencil = [device newDepthStencilStateWithDescriptor:ds_desc];
  3032. ERR_FAIL_NULL_V_MSG(pipeline->depth_stencil, PipelineID(), "Failed to create depth stencil state");
  3033. } else {
  3034. // TODO(sgc): FB13671991 raised as Apple docs state calling setDepthStencilState:nil is valid, but currently generates an exception
  3035. pipeline->depth_stencil = get_resource_cache().get_depth_stencil_state(false, false);
  3036. }
  3037. // Blend state.
  3038. {
  3039. for (uint32_t i = 0; i < p_color_attachments.size(); i++) {
  3040. if (p_color_attachments[i] == ATTACHMENT_UNUSED) {
  3041. continue;
  3042. }
  3043. const PipelineColorBlendState::Attachment &bs = p_blend_state.attachments[i];
  3044. MTLRenderPipelineColorAttachmentDescriptor *ca_desc = desc.colorAttachments[p_color_attachments[i]];
  3045. ca_desc.blendingEnabled = bs.enable_blend;
  3046. ca_desc.sourceRGBBlendFactor = BLEND_FACTORS[bs.src_color_blend_factor];
  3047. ca_desc.destinationRGBBlendFactor = BLEND_FACTORS[bs.dst_color_blend_factor];
  3048. ca_desc.rgbBlendOperation = BLEND_OPERATIONS[bs.color_blend_op];
  3049. ca_desc.sourceAlphaBlendFactor = BLEND_FACTORS[bs.src_alpha_blend_factor];
  3050. ca_desc.destinationAlphaBlendFactor = BLEND_FACTORS[bs.dst_alpha_blend_factor];
  3051. ca_desc.alphaBlendOperation = BLEND_OPERATIONS[bs.alpha_blend_op];
  3052. ca_desc.writeMask = MTLColorWriteMaskNone;
  3053. if (bs.write_r) {
  3054. ca_desc.writeMask |= MTLColorWriteMaskRed;
  3055. }
  3056. if (bs.write_g) {
  3057. ca_desc.writeMask |= MTLColorWriteMaskGreen;
  3058. }
  3059. if (bs.write_b) {
  3060. ca_desc.writeMask |= MTLColorWriteMaskBlue;
  3061. }
  3062. if (bs.write_a) {
  3063. ca_desc.writeMask |= MTLColorWriteMaskAlpha;
  3064. }
  3065. }
  3066. pipeline->raster_state.blend.r = p_blend_state.blend_constant.r;
  3067. pipeline->raster_state.blend.g = p_blend_state.blend_constant.g;
  3068. pipeline->raster_state.blend.b = p_blend_state.blend_constant.b;
  3069. pipeline->raster_state.blend.a = p_blend_state.blend_constant.a;
  3070. }
  3071. // Dynamic state.
  3072. if (p_dynamic_state.has_flag(DYNAMIC_STATE_DEPTH_BIAS)) {
  3073. pipeline->raster_state.depth_bias.enabled = true;
  3074. }
  3075. if (p_dynamic_state.has_flag(DYNAMIC_STATE_BLEND_CONSTANTS)) {
  3076. pipeline->raster_state.blend.enabled = true;
  3077. }
  3078. if (p_dynamic_state.has_flag(DYNAMIC_STATE_DEPTH_BOUNDS)) {
  3079. // TODO(sgc): ??
  3080. }
  3081. if (p_dynamic_state.has_flag(DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
  3082. // TODO(sgc): ??
  3083. }
  3084. if (p_dynamic_state.has_flag(DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
  3085. // TODO(sgc): ??
  3086. }
  3087. if (p_dynamic_state.has_flag(DYNAMIC_STATE_STENCIL_REFERENCE)) {
  3088. pipeline->raster_state.stencil.enabled = true;
  3089. }
  3090. if (shader->vert != nil) {
  3091. Result<id<MTLFunction>> function_or_err = _create_function(shader->vert, @"main0", p_specialization_constants);
  3092. ERR_FAIL_COND_V(std::holds_alternative<Error>(function_or_err), PipelineID());
  3093. desc.vertexFunction = std::get<id<MTLFunction>>(function_or_err);
  3094. }
  3095. if (shader->frag != nil) {
  3096. Result<id<MTLFunction>> function_or_err = _create_function(shader->frag, @"main0", p_specialization_constants);
  3097. ERR_FAIL_COND_V(std::holds_alternative<Error>(function_or_err), PipelineID());
  3098. desc.fragmentFunction = std::get<id<MTLFunction>>(function_or_err);
  3099. }
  3100. if (archive) {
  3101. desc.binaryArchives = @[ archive ];
  3102. }
  3103. NSError *error = nil;
  3104. pipeline->state = [device newRenderPipelineStateWithDescriptor:desc
  3105. error:&error];
  3106. pipeline->shader = shader;
  3107. ERR_FAIL_COND_V_MSG(error != nil, PipelineID(), ([NSString stringWithFormat:@"error creating pipeline: %@", error.localizedDescription].UTF8String));
  3108. if (archive) {
  3109. if ([archive addRenderPipelineFunctionsWithDescriptor:desc error:&error]) {
  3110. archive_count += 1;
  3111. } else {
  3112. print_error(error.localizedDescription.UTF8String);
  3113. }
  3114. }
  3115. return PipelineID(pipeline);
  3116. }
  3117. #pragma mark - Compute
  3118. // ----- COMMANDS -----
  3119. void RenderingDeviceDriverMetal::command_bind_compute_pipeline(CommandBufferID p_cmd_buffer, PipelineID p_pipeline) {
  3120. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3121. cb->bind_pipeline(p_pipeline);
  3122. }
  3123. void RenderingDeviceDriverMetal::command_bind_compute_uniform_set(CommandBufferID p_cmd_buffer, UniformSetID p_uniform_set, ShaderID p_shader, uint32_t p_set_index) {
  3124. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3125. cb->compute_bind_uniform_set(p_uniform_set, p_shader, p_set_index);
  3126. }
  3127. void RenderingDeviceDriverMetal::command_bind_compute_uniform_sets(CommandBufferID p_cmd_buffer, VectorView<UniformSetID> p_uniform_sets, ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  3128. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3129. cb->compute_bind_uniform_sets(p_uniform_sets, p_shader, p_first_set_index, p_set_count);
  3130. }
  3131. void RenderingDeviceDriverMetal::command_compute_dispatch(CommandBufferID p_cmd_buffer, uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  3132. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3133. cb->compute_dispatch(p_x_groups, p_y_groups, p_z_groups);
  3134. }
  3135. void RenderingDeviceDriverMetal::command_compute_dispatch_indirect(CommandBufferID p_cmd_buffer, BufferID p_indirect_buffer, uint64_t p_offset) {
  3136. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3137. cb->compute_dispatch_indirect(p_indirect_buffer, p_offset);
  3138. }
  3139. // ----- PIPELINE -----
  3140. RDD::PipelineID RenderingDeviceDriverMetal::compute_pipeline_create(ShaderID p_shader, VectorView<PipelineSpecializationConstant> p_specialization_constants) {
  3141. MDComputeShader *shader = (MDComputeShader *)(p_shader.id);
  3142. os_signpost_id_t reflect_id = os_signpost_id_make_with_pointer(LOG_INTERVALS, shader);
  3143. os_signpost_interval_begin(LOG_INTERVALS, reflect_id, "compute_pipeline_create", "shader_name=%{public}s", shader->name.get_data());
  3144. DEFER([=]() {
  3145. os_signpost_interval_end(LOG_INTERVALS, reflect_id, "compute_pipeline_create");
  3146. });
  3147. os_signpost_event_emit(LOG_DRIVER, OS_SIGNPOST_ID_EXCLUSIVE, "create_pipeline");
  3148. Result<id<MTLFunction>> function_or_err = _create_function(shader->kernel, @"main0", p_specialization_constants);
  3149. ERR_FAIL_COND_V(std::holds_alternative<Error>(function_or_err), PipelineID());
  3150. id<MTLFunction> function = std::get<id<MTLFunction>>(function_or_err);
  3151. MTLComputePipelineDescriptor *desc = [MTLComputePipelineDescriptor new];
  3152. desc.computeFunction = function;
  3153. if (archive) {
  3154. desc.binaryArchives = @[ archive ];
  3155. }
  3156. NSError *error;
  3157. id<MTLComputePipelineState> state = [device newComputePipelineStateWithDescriptor:desc
  3158. options:MTLPipelineOptionNone
  3159. reflection:nil
  3160. error:&error];
  3161. ERR_FAIL_COND_V_MSG(error != nil, PipelineID(), ([NSString stringWithFormat:@"error creating pipeline: %@", error.localizedDescription].UTF8String));
  3162. MDComputePipeline *pipeline = new MDComputePipeline(state);
  3163. pipeline->compute_state.local = shader->local;
  3164. pipeline->shader = shader;
  3165. if (archive) {
  3166. if ([archive addComputePipelineFunctionsWithDescriptor:desc error:&error]) {
  3167. archive_count += 1;
  3168. } else {
  3169. print_error(error.localizedDescription.UTF8String);
  3170. }
  3171. }
  3172. return PipelineID(pipeline);
  3173. }
  3174. #pragma mark - Queries
  3175. // ----- TIMESTAMP -----
  3176. RDD::QueryPoolID RenderingDeviceDriverMetal::timestamp_query_pool_create(uint32_t p_query_count) {
  3177. return QueryPoolID(1);
  3178. }
  3179. void RenderingDeviceDriverMetal::timestamp_query_pool_free(QueryPoolID p_pool_id) {
  3180. }
  3181. void RenderingDeviceDriverMetal::timestamp_query_pool_get_results(QueryPoolID p_pool_id, uint32_t p_query_count, uint64_t *r_results) {
  3182. // Metal doesn't support timestamp queries, so we just clear the buffer.
  3183. bzero(r_results, p_query_count * sizeof(uint64_t));
  3184. }
  3185. uint64_t RenderingDeviceDriverMetal::timestamp_query_result_to_time(uint64_t p_result) {
  3186. return p_result;
  3187. }
  3188. void RenderingDeviceDriverMetal::command_timestamp_query_pool_reset(CommandBufferID p_cmd_buffer, QueryPoolID p_pool_id, uint32_t p_query_count) {
  3189. }
  3190. void RenderingDeviceDriverMetal::command_timestamp_write(CommandBufferID p_cmd_buffer, QueryPoolID p_pool_id, uint32_t p_index) {
  3191. }
  3192. #pragma mark - Labels
  3193. void RenderingDeviceDriverMetal::command_begin_label(CommandBufferID p_cmd_buffer, const char *p_label_name, const Color &p_color) {
  3194. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3195. NSString *s = [[NSString alloc] initWithBytesNoCopy:(void *)p_label_name length:strlen(p_label_name) encoding:NSUTF8StringEncoding freeWhenDone:NO];
  3196. [cb->get_command_buffer() pushDebugGroup:s];
  3197. }
  3198. void RenderingDeviceDriverMetal::command_end_label(CommandBufferID p_cmd_buffer) {
  3199. MDCommandBuffer *cb = (MDCommandBuffer *)(p_cmd_buffer.id);
  3200. [cb->get_command_buffer() popDebugGroup];
  3201. }
  3202. #pragma mark - Debug
  3203. void RenderingDeviceDriverMetal::command_insert_breadcrumb(CommandBufferID p_cmd_buffer, uint32_t p_data) {
  3204. // TODO: Implement.
  3205. }
  3206. #pragma mark - Submission
  3207. void RenderingDeviceDriverMetal::begin_segment(uint32_t p_frame_index, uint32_t p_frames_drawn) {
  3208. }
  3209. void RenderingDeviceDriverMetal::end_segment() {
  3210. }
  3211. #pragma mark - Misc
  3212. void RenderingDeviceDriverMetal::set_object_name(ObjectType p_type, ID p_driver_id, const String &p_name) {
  3213. switch (p_type) {
  3214. case OBJECT_TYPE_TEXTURE: {
  3215. id<MTLTexture> tex = rid::get(p_driver_id);
  3216. tex.label = [NSString stringWithUTF8String:p_name.utf8().get_data()];
  3217. } break;
  3218. case OBJECT_TYPE_SAMPLER: {
  3219. // Can't set label after creation.
  3220. } break;
  3221. case OBJECT_TYPE_BUFFER: {
  3222. id<MTLBuffer> buffer = rid::get(p_driver_id);
  3223. buffer.label = [NSString stringWithUTF8String:p_name.utf8().get_data()];
  3224. } break;
  3225. case OBJECT_TYPE_SHADER: {
  3226. NSString *label = [NSString stringWithUTF8String:p_name.utf8().get_data()];
  3227. MDShader *shader = (MDShader *)(p_driver_id.id);
  3228. if (MDRenderShader *rs = dynamic_cast<MDRenderShader *>(shader); rs != nullptr) {
  3229. [rs->vert setLabel:label];
  3230. [rs->frag setLabel:label];
  3231. } else if (MDComputeShader *cs = dynamic_cast<MDComputeShader *>(shader); cs != nullptr) {
  3232. [cs->kernel setLabel:label];
  3233. } else {
  3234. DEV_ASSERT(false);
  3235. }
  3236. } break;
  3237. case OBJECT_TYPE_UNIFORM_SET: {
  3238. MDUniformSet *set = (MDUniformSet *)(p_driver_id.id);
  3239. for (KeyValue<MDShader *, BoundUniformSet> &keyval : set->bound_uniforms) {
  3240. keyval.value.buffer.label = [NSString stringWithUTF8String:p_name.utf8().get_data()];
  3241. }
  3242. } break;
  3243. case OBJECT_TYPE_PIPELINE: {
  3244. // Can't set label after creation.
  3245. } break;
  3246. default: {
  3247. DEV_ASSERT(false);
  3248. }
  3249. }
  3250. }
  3251. uint64_t RenderingDeviceDriverMetal::get_resource_native_handle(DriverResource p_type, ID p_driver_id) {
  3252. switch (p_type) {
  3253. case DRIVER_RESOURCE_LOGICAL_DEVICE: {
  3254. return (uint64_t)(uintptr_t)(__bridge void *)device;
  3255. }
  3256. case DRIVER_RESOURCE_PHYSICAL_DEVICE: {
  3257. return 0;
  3258. }
  3259. case DRIVER_RESOURCE_TOPMOST_OBJECT: {
  3260. return 0;
  3261. }
  3262. case DRIVER_RESOURCE_COMMAND_QUEUE: {
  3263. return (uint64_t)(uintptr_t)(__bridge void *)device_queue;
  3264. }
  3265. case DRIVER_RESOURCE_QUEUE_FAMILY: {
  3266. return 0;
  3267. }
  3268. case DRIVER_RESOURCE_TEXTURE: {
  3269. return p_driver_id.id;
  3270. }
  3271. case DRIVER_RESOURCE_TEXTURE_VIEW: {
  3272. return p_driver_id.id;
  3273. }
  3274. case DRIVER_RESOURCE_TEXTURE_DATA_FORMAT: {
  3275. return 0;
  3276. }
  3277. case DRIVER_RESOURCE_SAMPLER: {
  3278. return p_driver_id.id;
  3279. }
  3280. case DRIVER_RESOURCE_UNIFORM_SET: {
  3281. return 0;
  3282. }
  3283. case DRIVER_RESOURCE_BUFFER: {
  3284. return p_driver_id.id;
  3285. }
  3286. case DRIVER_RESOURCE_COMPUTE_PIPELINE: {
  3287. MDComputePipeline *pipeline = (MDComputePipeline *)(p_driver_id.id);
  3288. return (uint64_t)(uintptr_t)(__bridge void *)pipeline->state;
  3289. }
  3290. case DRIVER_RESOURCE_RENDER_PIPELINE: {
  3291. MDRenderPipeline *pipeline = (MDRenderPipeline *)(p_driver_id.id);
  3292. return (uint64_t)(uintptr_t)(__bridge void *)pipeline->state;
  3293. }
  3294. default: {
  3295. return 0;
  3296. }
  3297. }
  3298. }
  3299. uint64_t RenderingDeviceDriverMetal::get_total_memory_used() {
  3300. return device.currentAllocatedSize;
  3301. }
  3302. uint64_t RenderingDeviceDriverMetal::get_lazily_memory_used() {
  3303. return 0; // TODO: Track this (grep for memoryless in Godot's Metal backend).
  3304. }
  3305. uint64_t RenderingDeviceDriverMetal::limit_get(Limit p_limit) {
  3306. MetalDeviceProperties const &props = (*device_properties);
  3307. MetalLimits const &limits = props.limits;
  3308. #if defined(DEV_ENABLED)
  3309. #define UNKNOWN(NAME) \
  3310. case NAME: \
  3311. WARN_PRINT_ONCE("Returning maximum value for unknown limit " #NAME "."); \
  3312. return (uint64_t)1 << 30;
  3313. #else
  3314. #define UNKNOWN(NAME) \
  3315. case NAME: \
  3316. return (uint64_t)1 << 30
  3317. #endif
  3318. // clang-format off
  3319. switch (p_limit) {
  3320. case LIMIT_MAX_BOUND_UNIFORM_SETS:
  3321. return limits.maxBoundDescriptorSets;
  3322. case LIMIT_MAX_FRAMEBUFFER_COLOR_ATTACHMENTS:
  3323. return limits.maxColorAttachments;
  3324. case LIMIT_MAX_TEXTURES_PER_UNIFORM_SET:
  3325. return limits.maxTexturesPerArgumentBuffer;
  3326. case LIMIT_MAX_SAMPLERS_PER_UNIFORM_SET:
  3327. return limits.maxSamplersPerArgumentBuffer;
  3328. case LIMIT_MAX_STORAGE_BUFFERS_PER_UNIFORM_SET:
  3329. return limits.maxBuffersPerArgumentBuffer;
  3330. case LIMIT_MAX_STORAGE_IMAGES_PER_UNIFORM_SET:
  3331. return limits.maxTexturesPerArgumentBuffer;
  3332. case LIMIT_MAX_UNIFORM_BUFFERS_PER_UNIFORM_SET:
  3333. return limits.maxBuffersPerArgumentBuffer;
  3334. case LIMIT_MAX_DRAW_INDEXED_INDEX:
  3335. return limits.maxDrawIndexedIndexValue;
  3336. case LIMIT_MAX_FRAMEBUFFER_HEIGHT:
  3337. return limits.maxFramebufferHeight;
  3338. case LIMIT_MAX_FRAMEBUFFER_WIDTH:
  3339. return limits.maxFramebufferWidth;
  3340. case LIMIT_MAX_TEXTURE_ARRAY_LAYERS:
  3341. return limits.maxImageArrayLayers;
  3342. case LIMIT_MAX_TEXTURE_SIZE_1D:
  3343. return limits.maxImageDimension1D;
  3344. case LIMIT_MAX_TEXTURE_SIZE_2D:
  3345. return limits.maxImageDimension2D;
  3346. case LIMIT_MAX_TEXTURE_SIZE_3D:
  3347. return limits.maxImageDimension3D;
  3348. case LIMIT_MAX_TEXTURE_SIZE_CUBE:
  3349. return limits.maxImageDimensionCube;
  3350. case LIMIT_MAX_TEXTURES_PER_SHADER_STAGE:
  3351. return limits.maxTexturesPerArgumentBuffer;
  3352. case LIMIT_MAX_SAMPLERS_PER_SHADER_STAGE:
  3353. return limits.maxSamplersPerArgumentBuffer;
  3354. case LIMIT_MAX_STORAGE_BUFFERS_PER_SHADER_STAGE:
  3355. return limits.maxBuffersPerArgumentBuffer;
  3356. case LIMIT_MAX_STORAGE_IMAGES_PER_SHADER_STAGE:
  3357. return limits.maxTexturesPerArgumentBuffer;
  3358. case LIMIT_MAX_UNIFORM_BUFFERS_PER_SHADER_STAGE:
  3359. return limits.maxBuffersPerArgumentBuffer;
  3360. case LIMIT_MAX_PUSH_CONSTANT_SIZE:
  3361. return limits.maxBufferLength;
  3362. case LIMIT_MAX_UNIFORM_BUFFER_SIZE:
  3363. return limits.maxBufferLength;
  3364. case LIMIT_MAX_VERTEX_INPUT_ATTRIBUTE_OFFSET:
  3365. return limits.maxVertexDescriptorLayoutStride;
  3366. case LIMIT_MAX_VERTEX_INPUT_ATTRIBUTES:
  3367. return limits.maxVertexInputAttributes;
  3368. case LIMIT_MAX_VERTEX_INPUT_BINDINGS:
  3369. return limits.maxVertexInputBindings;
  3370. case LIMIT_MAX_VERTEX_INPUT_BINDING_STRIDE:
  3371. return limits.maxVertexInputBindingStride;
  3372. case LIMIT_MIN_UNIFORM_BUFFER_OFFSET_ALIGNMENT:
  3373. return limits.minUniformBufferOffsetAlignment;
  3374. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_X:
  3375. return limits.maxComputeWorkGroupCount.width;
  3376. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Y:
  3377. return limits.maxComputeWorkGroupCount.height;
  3378. case LIMIT_MAX_COMPUTE_WORKGROUP_COUNT_Z:
  3379. return limits.maxComputeWorkGroupCount.depth;
  3380. case LIMIT_MAX_COMPUTE_WORKGROUP_INVOCATIONS:
  3381. return std::max({ limits.maxThreadsPerThreadGroup.width, limits.maxThreadsPerThreadGroup.height, limits.maxThreadsPerThreadGroup.depth });
  3382. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_X:
  3383. return limits.maxThreadsPerThreadGroup.width;
  3384. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Y:
  3385. return limits.maxThreadsPerThreadGroup.height;
  3386. case LIMIT_MAX_COMPUTE_WORKGROUP_SIZE_Z:
  3387. return limits.maxThreadsPerThreadGroup.depth;
  3388. case LIMIT_MAX_VIEWPORT_DIMENSIONS_X:
  3389. return limits.maxViewportDimensionX;
  3390. case LIMIT_MAX_VIEWPORT_DIMENSIONS_Y:
  3391. return limits.maxViewportDimensionY;
  3392. case LIMIT_SUBGROUP_SIZE:
  3393. // MoltenVK sets the subgroupSize to the same as the maxSubgroupSize.
  3394. return limits.maxSubgroupSize;
  3395. case LIMIT_SUBGROUP_MIN_SIZE:
  3396. return limits.minSubgroupSize;
  3397. case LIMIT_SUBGROUP_MAX_SIZE:
  3398. return limits.maxSubgroupSize;
  3399. case LIMIT_SUBGROUP_IN_SHADERS:
  3400. return (uint64_t)limits.subgroupSupportedShaderStages;
  3401. case LIMIT_SUBGROUP_OPERATIONS:
  3402. return (uint64_t)limits.subgroupSupportedOperations;
  3403. case LIMIT_METALFX_TEMPORAL_SCALER_MIN_SCALE:
  3404. return (uint64_t)((1.0 / limits.temporalScalerInputContentMaxScale) * 1000'000);
  3405. case LIMIT_METALFX_TEMPORAL_SCALER_MAX_SCALE:
  3406. return (uint64_t)((1.0 / limits.temporalScalerInputContentMinScale) * 1000'000);
  3407. case LIMIT_MAX_SHADER_VARYINGS:
  3408. return limits.maxShaderVaryings;
  3409. UNKNOWN(LIMIT_VRS_TEXEL_WIDTH);
  3410. UNKNOWN(LIMIT_VRS_TEXEL_HEIGHT);
  3411. UNKNOWN(LIMIT_VRS_MAX_FRAGMENT_WIDTH);
  3412. UNKNOWN(LIMIT_VRS_MAX_FRAGMENT_HEIGHT);
  3413. default:
  3414. ERR_FAIL_V(0);
  3415. }
  3416. // clang-format on
  3417. return 0;
  3418. }
  3419. uint64_t RenderingDeviceDriverMetal::api_trait_get(ApiTrait p_trait) {
  3420. switch (p_trait) {
  3421. case API_TRAIT_HONORS_PIPELINE_BARRIERS:
  3422. return 0;
  3423. default:
  3424. return RenderingDeviceDriver::api_trait_get(p_trait);
  3425. }
  3426. }
  3427. bool RenderingDeviceDriverMetal::has_feature(Features p_feature) {
  3428. switch (p_feature) {
  3429. case SUPPORTS_MULTIVIEW:
  3430. return multiview_capabilities.is_supported;
  3431. case SUPPORTS_FSR_HALF_FLOAT:
  3432. return true;
  3433. case SUPPORTS_ATTACHMENT_VRS:
  3434. // TODO(sgc): Maybe supported via https://developer.apple.com/documentation/metal/render_passes/rendering_at_different_rasterization_rates?language=objc
  3435. // See also:
  3436. //
  3437. // * https://forum.beyond3d.com/threads/variable-rate-shading-vs-variable-rate-rasterization.62243/post-2191363
  3438. //
  3439. return false;
  3440. case SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS:
  3441. return true;
  3442. case SUPPORTS_BUFFER_DEVICE_ADDRESS:
  3443. return device_properties->features.supports_gpu_address;
  3444. case SUPPORTS_METALFX_SPATIAL:
  3445. return device_properties->features.metal_fx_spatial;
  3446. case SUPPORTS_METALFX_TEMPORAL:
  3447. return device_properties->features.metal_fx_temporal;
  3448. default:
  3449. return false;
  3450. }
  3451. }
  3452. const RDD::MultiviewCapabilities &RenderingDeviceDriverMetal::get_multiview_capabilities() {
  3453. return multiview_capabilities;
  3454. }
  3455. String RenderingDeviceDriverMetal::get_api_version() const {
  3456. return vformat("%d.%d", version_major, version_minor);
  3457. }
  3458. String RenderingDeviceDriverMetal::get_pipeline_cache_uuid() const {
  3459. return pipeline_cache_id;
  3460. }
  3461. const RDD::Capabilities &RenderingDeviceDriverMetal::get_capabilities() const {
  3462. return capabilities;
  3463. }
  3464. bool RenderingDeviceDriverMetal::is_composite_alpha_supported(CommandQueueID p_queue) const {
  3465. // The CAMetalLayer.opaque property is configured according to this global setting.
  3466. return OS::get_singleton()->is_layered_allowed();
  3467. }
  3468. size_t RenderingDeviceDriverMetal::get_texel_buffer_alignment_for_format(RDD::DataFormat p_format) const {
  3469. return [device minimumLinearTextureAlignmentForPixelFormat:pixel_formats->getMTLPixelFormat(p_format)];
  3470. }
  3471. size_t RenderingDeviceDriverMetal::get_texel_buffer_alignment_for_format(MTLPixelFormat p_format) const {
  3472. return [device minimumLinearTextureAlignmentForPixelFormat:p_format];
  3473. }
  3474. /******************/
  3475. RenderingDeviceDriverMetal::RenderingDeviceDriverMetal(RenderingContextDriverMetal *p_context_driver) :
  3476. context_driver(p_context_driver) {
  3477. DEV_ASSERT(p_context_driver != nullptr);
  3478. #if TARGET_OS_OSX
  3479. if (String res = OS::get_singleton()->get_environment("GODOT_MTL_SHADER_LOAD_STRATEGY"); res == U"lazy") {
  3480. _shader_load_strategy = ShaderLoadStrategy::LAZY;
  3481. }
  3482. #else
  3483. // Always use the lazy strategy on other OSs like iOS, tvOS, or visionOS.
  3484. _shader_load_strategy = ShaderLoadStrategy::LAZY;
  3485. #endif
  3486. }
  3487. RenderingDeviceDriverMetal::~RenderingDeviceDriverMetal() {
  3488. for (MDCommandBuffer *cb : command_buffers) {
  3489. delete cb;
  3490. }
  3491. for (KeyValue<SHA256Digest, ShaderCacheEntry *> &kv : _shader_cache) {
  3492. memdelete(kv.value);
  3493. }
  3494. }
  3495. #pragma mark - Initialization
  3496. Error RenderingDeviceDriverMetal::_create_device() {
  3497. device = context_driver->get_metal_device();
  3498. device_queue = [device newCommandQueue];
  3499. ERR_FAIL_NULL_V(device_queue, ERR_CANT_CREATE);
  3500. device_scope = [MTLCaptureManager.sharedCaptureManager newCaptureScopeWithCommandQueue:device_queue];
  3501. device_scope.label = @"Godot Frame";
  3502. [device_scope beginScope]; // Allow Xcode to capture the first frame, if desired.
  3503. resource_cache = std::make_unique<MDResourceCache>(this);
  3504. return OK;
  3505. }
  3506. Error RenderingDeviceDriverMetal::_check_capabilities() {
  3507. MTLCompileOptions *options = [MTLCompileOptions new];
  3508. version_major = (options.languageVersion >> 0x10) & 0xff;
  3509. version_minor = (options.languageVersion >> 0x00) & 0xff;
  3510. capabilities.device_family = DEVICE_METAL;
  3511. capabilities.version_major = version_major;
  3512. capabilities.version_minor = version_minor;
  3513. return OK;
  3514. }
  3515. Error RenderingDeviceDriverMetal::initialize(uint32_t p_device_index, uint32_t p_frame_count) {
  3516. context_device = context_driver->device_get(p_device_index);
  3517. Error err = _create_device();
  3518. ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
  3519. err = _check_capabilities();
  3520. ERR_FAIL_COND_V(err, ERR_CANT_CREATE);
  3521. // Set the pipeline cache ID based on the Metal version.
  3522. pipeline_cache_id = "metal-driver-" + get_api_version();
  3523. device_properties = memnew(MetalDeviceProperties(device));
  3524. pixel_formats = memnew(PixelFormats(device, device_properties->features));
  3525. if (device_properties->features.layeredRendering) {
  3526. multiview_capabilities.is_supported = true;
  3527. multiview_capabilities.max_view_count = device_properties->limits.maxViewports;
  3528. // NOTE: I'm not sure what the limit is as I don't see it referenced anywhere
  3529. multiview_capabilities.max_instance_count = UINT32_MAX;
  3530. print_verbose("- Metal multiview supported:");
  3531. print_verbose(" max view count: " + itos(multiview_capabilities.max_view_count));
  3532. print_verbose(" max instances: " + itos(multiview_capabilities.max_instance_count));
  3533. } else {
  3534. print_verbose("- Metal multiview not supported");
  3535. }
  3536. // The Metal renderer requires Apple4 family. This is 2017 era A11 chips and newer.
  3537. if (device_properties->features.highestFamily < MTLGPUFamilyApple4) {
  3538. String error_string = vformat("Your Apple GPU does not support the following features, which are required to use Metal-based renderers in Godot:\n\n");
  3539. if (!device_properties->features.imageCubeArray) {
  3540. error_string += "- No support for image cube arrays.\n";
  3541. }
  3542. #if defined(IOS_ENABLED)
  3543. // iOS platform ports currently don't exit themselves when this method returns `ERR_CANT_CREATE`.
  3544. OS::get_singleton()->alert(error_string + "\nClick OK to exit (black screen will be visible).");
  3545. #else
  3546. OS::get_singleton()->alert(error_string + "\nClick OK to exit.");
  3547. #endif
  3548. return ERR_CANT_CREATE;
  3549. }
  3550. return OK;
  3551. }