metal_objects.mm 74 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111
  1. /**************************************************************************/
  2. /* metal_objects.mm */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. /**************************************************************************/
  31. /* */
  32. /* Portions of this code were derived from MoltenVK. */
  33. /* */
  34. /* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
  35. /* (http://www.brenwill.com) */
  36. /* */
  37. /* Licensed under the Apache License, Version 2.0 (the "License"); */
  38. /* you may not use this file except in compliance with the License. */
  39. /* You may obtain a copy of the License at */
  40. /* */
  41. /* http://www.apache.org/licenses/LICENSE-2.0 */
  42. /* */
  43. /* Unless required by applicable law or agreed to in writing, software */
  44. /* distributed under the License is distributed on an "AS IS" BASIS, */
  45. /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
  46. /* implied. See the License for the specific language governing */
  47. /* permissions and limitations under the License. */
  48. /**************************************************************************/
  49. #import "metal_objects.h"
  50. #import "metal_utils.h"
  51. #import "pixel_formats.h"
  52. #import "rendering_device_driver_metal.h"
  53. #import <os/signpost.h>
  54. void MDCommandBuffer::begin() {
  55. DEV_ASSERT(commandBuffer == nil);
  56. commandBuffer = queue.commandBufferWithUnretainedReferences;
  57. }
  58. void MDCommandBuffer::end() {
  59. switch (type) {
  60. case MDCommandBufferStateType::None:
  61. return;
  62. case MDCommandBufferStateType::Render:
  63. return render_end_pass();
  64. case MDCommandBufferStateType::Compute:
  65. return _end_compute_dispatch();
  66. case MDCommandBufferStateType::Blit:
  67. return _end_blit();
  68. }
  69. }
  70. void MDCommandBuffer::commit() {
  71. end();
  72. [commandBuffer commit];
  73. commandBuffer = nil;
  74. }
  75. void MDCommandBuffer::bind_pipeline(RDD::PipelineID p_pipeline) {
  76. MDPipeline *p = (MDPipeline *)(p_pipeline.id);
  77. // End current encoder if it is a compute encoder or blit encoder,
  78. // as they do not have a defined end boundary in the RDD like render.
  79. if (type == MDCommandBufferStateType::Compute) {
  80. _end_compute_dispatch();
  81. } else if (type == MDCommandBufferStateType::Blit) {
  82. _end_blit();
  83. }
  84. if (p->type == MDPipelineType::Render) {
  85. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  86. MDRenderPipeline *rp = (MDRenderPipeline *)p;
  87. if (render.encoder == nil) {
  88. // This error would happen if the render pass failed.
  89. ERR_FAIL_NULL_MSG(render.desc, "Render pass descriptor is null.");
  90. // This condition occurs when there are no attachments when calling render_next_subpass()
  91. // and is due to the SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS flag.
  92. render.desc.defaultRasterSampleCount = static_cast<NSUInteger>(rp->sample_count);
  93. // NOTE(sgc): This is to test rdar://FB13605547 and will be deleted once fix is confirmed.
  94. #if 0
  95. if (render.pipeline->sample_count == 4) {
  96. static id<MTLTexture> tex = nil;
  97. static id<MTLTexture> res_tex = nil;
  98. static dispatch_once_t onceToken;
  99. dispatch_once(&onceToken, ^{
  100. Size2i sz = render.frameBuffer->size;
  101. MTLTextureDescriptor *td = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatRGBA8Unorm width:sz.width height:sz.height mipmapped:NO];
  102. td.textureType = MTLTextureType2DMultisample;
  103. td.storageMode = MTLStorageModeMemoryless;
  104. td.usage = MTLTextureUsageRenderTarget;
  105. td.sampleCount = render.pipeline->sample_count;
  106. tex = [device_driver->get_device() newTextureWithDescriptor:td];
  107. td.textureType = MTLTextureType2D;
  108. td.storageMode = MTLStorageModePrivate;
  109. td.usage = MTLTextureUsageShaderWrite;
  110. td.sampleCount = 1;
  111. res_tex = [device_driver->get_device() newTextureWithDescriptor:td];
  112. });
  113. render.desc.colorAttachments[0].texture = tex;
  114. render.desc.colorAttachments[0].loadAction = MTLLoadActionClear;
  115. render.desc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
  116. render.desc.colorAttachments[0].resolveTexture = res_tex;
  117. }
  118. #endif
  119. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:render.desc];
  120. }
  121. if (render.pipeline != rp) {
  122. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_RASTER));
  123. // Mark all uniforms as dirty, as variants of a shader pipeline may have a different entry point ABI,
  124. // due to setting force_active_argument_buffer_resources = true for spirv_cross::CompilerMSL::Options.
  125. // As a result, uniform sets with the same layout will generate redundant binding warnings when
  126. // capturing a Metal frame in Xcode.
  127. //
  128. // If we don't mark as dirty, then some bindings will generate a validation error.
  129. render.mark_uniforms_dirty();
  130. if (render.pipeline != nullptr && render.pipeline->depth_stencil != rp->depth_stencil) {
  131. render.dirty.set_flag(RenderState::DIRTY_DEPTH);
  132. }
  133. if (rp->raster_state.blend.enabled) {
  134. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  135. }
  136. render.pipeline = rp;
  137. }
  138. } else if (p->type == MDPipelineType::Compute) {
  139. DEV_ASSERT(type == MDCommandBufferStateType::None);
  140. type = MDCommandBufferStateType::Compute;
  141. compute.pipeline = (MDComputePipeline *)p;
  142. compute.encoder = commandBuffer.computeCommandEncoder;
  143. [compute.encoder setComputePipelineState:compute.pipeline->state];
  144. }
  145. }
  146. id<MTLBlitCommandEncoder> MDCommandBuffer::blit_command_encoder() {
  147. switch (type) {
  148. case MDCommandBufferStateType::None:
  149. break;
  150. case MDCommandBufferStateType::Render:
  151. render_end_pass();
  152. break;
  153. case MDCommandBufferStateType::Compute:
  154. _end_compute_dispatch();
  155. break;
  156. case MDCommandBufferStateType::Blit:
  157. return blit.encoder;
  158. }
  159. type = MDCommandBufferStateType::Blit;
  160. blit.encoder = commandBuffer.blitCommandEncoder;
  161. return blit.encoder;
  162. }
  163. void MDCommandBuffer::encodeRenderCommandEncoderWithDescriptor(MTLRenderPassDescriptor *p_desc, NSString *p_label) {
  164. switch (type) {
  165. case MDCommandBufferStateType::None:
  166. break;
  167. case MDCommandBufferStateType::Render:
  168. render_end_pass();
  169. break;
  170. case MDCommandBufferStateType::Compute:
  171. _end_compute_dispatch();
  172. break;
  173. case MDCommandBufferStateType::Blit:
  174. _end_blit();
  175. break;
  176. }
  177. id<MTLRenderCommandEncoder> enc = [commandBuffer renderCommandEncoderWithDescriptor:p_desc];
  178. if (p_label != nil) {
  179. [enc pushDebugGroup:p_label];
  180. [enc popDebugGroup];
  181. }
  182. [enc endEncoding];
  183. }
  184. #pragma mark - Render Commands
  185. void MDCommandBuffer::render_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  186. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  187. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  188. if (render.uniform_sets.size() <= set->index) {
  189. uint32_t s = render.uniform_sets.size();
  190. render.uniform_sets.resize(set->index + 1);
  191. // Set intermediate values to null.
  192. std::fill(&render.uniform_sets[s], &render.uniform_sets[set->index] + 1, nullptr);
  193. }
  194. if (render.uniform_sets[set->index] != set) {
  195. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  196. render.uniform_set_mask |= 1ULL << set->index;
  197. render.uniform_sets[set->index] = set;
  198. }
  199. }
  200. void MDCommandBuffer::render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  201. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  202. for (size_t i = 0u; i < p_set_count; ++i) {
  203. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  204. if (render.uniform_sets.size() <= set->index) {
  205. uint32_t s = render.uniform_sets.size();
  206. render.uniform_sets.resize(set->index + 1);
  207. // Set intermediate values to null.
  208. std::fill(&render.uniform_sets[s], &render.uniform_sets[set->index] + 1, nullptr);
  209. }
  210. if (render.uniform_sets[set->index] != set) {
  211. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  212. render.uniform_set_mask |= 1ULL << set->index;
  213. render.uniform_sets[set->index] = set;
  214. }
  215. }
  216. }
  217. void MDCommandBuffer::render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) {
  218. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  219. const MDSubpass &subpass = render.get_subpass();
  220. uint32_t vertex_count = p_rects.size() * 6 * subpass.view_count;
  221. simd::float4 *vertices = ALLOCA_ARRAY(simd::float4, vertex_count);
  222. simd::float4 clear_colors[ClearAttKey::ATTACHMENT_COUNT];
  223. Size2i size = render.frameBuffer->size;
  224. Rect2i render_area = render.clip_to_render_area({ { 0, 0 }, size });
  225. size = Size2i(render_area.position.x + render_area.size.width, render_area.position.y + render_area.size.height);
  226. _populate_vertices(vertices, size, p_rects);
  227. ClearAttKey key;
  228. key.sample_count = render.pass->get_sample_count();
  229. if (subpass.view_count > 1) {
  230. key.enable_layered_rendering();
  231. }
  232. float depth_value = 0;
  233. uint32_t stencil_value = 0;
  234. for (uint32_t i = 0; i < p_attachment_clears.size(); i++) {
  235. RDD::AttachmentClear const &attClear = p_attachment_clears[i];
  236. uint32_t attachment_index;
  237. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  238. attachment_index = attClear.color_attachment;
  239. } else {
  240. attachment_index = subpass.depth_stencil_reference.attachment;
  241. }
  242. MDAttachment const &mda = render.pass->attachments[attachment_index];
  243. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  244. key.set_color_format(attachment_index, mda.format);
  245. clear_colors[attachment_index] = {
  246. attClear.value.color.r,
  247. attClear.value.color.g,
  248. attClear.value.color.b,
  249. attClear.value.color.a
  250. };
  251. }
  252. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT)) {
  253. key.set_depth_format(mda.format);
  254. depth_value = attClear.value.depth;
  255. }
  256. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT)) {
  257. key.set_stencil_format(mda.format);
  258. stencil_value = attClear.value.stencil;
  259. }
  260. }
  261. clear_colors[ClearAttKey::DEPTH_INDEX] = {
  262. depth_value,
  263. depth_value,
  264. depth_value,
  265. depth_value
  266. };
  267. id<MTLRenderCommandEncoder> enc = render.encoder;
  268. MDResourceCache &cache = device_driver->get_resource_cache();
  269. [enc pushDebugGroup:@"ClearAttachments"];
  270. [enc setRenderPipelineState:cache.get_clear_render_pipeline_state(key, nil)];
  271. [enc setDepthStencilState:cache.get_depth_stencil_state(
  272. key.is_depth_enabled(),
  273. key.is_stencil_enabled())];
  274. [enc setStencilReferenceValue:stencil_value];
  275. [enc setCullMode:MTLCullModeNone];
  276. [enc setTriangleFillMode:MTLTriangleFillModeFill];
  277. [enc setDepthBias:0 slopeScale:0 clamp:0];
  278. [enc setViewport:{ 0, 0, (double)size.width, (double)size.height, 0.0, 1.0 }];
  279. [enc setScissorRect:{ 0, 0, (NSUInteger)size.width, (NSUInteger)size.height }];
  280. [enc setVertexBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  281. [enc setFragmentBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  282. [enc setVertexBytes:vertices length:vertex_count * sizeof(vertices[0]) atIndex:device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX)];
  283. [enc drawPrimitives:MTLPrimitiveTypeTriangle vertexStart:0 vertexCount:vertex_count];
  284. [enc popDebugGroup];
  285. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_DEPTH | RenderState::DIRTY_RASTER));
  286. render.mark_uniforms_dirty({ 0 }); // Mark index 0 dirty, if there is already a binding for index 0.
  287. render.mark_viewport_dirty();
  288. render.mark_scissors_dirty();
  289. render.mark_vertex_dirty();
  290. render.mark_blend_dirty();
  291. }
  292. void MDCommandBuffer::_render_set_dirty_state() {
  293. _render_bind_uniform_sets();
  294. MDSubpass const &subpass = render.get_subpass();
  295. if (subpass.view_count > 1) {
  296. uint32_t view_range[2] = { 0, subpass.view_count };
  297. [render.encoder setVertexBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  298. [render.encoder setFragmentBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  299. }
  300. if (render.dirty.has_flag(RenderState::DIRTY_PIPELINE)) {
  301. [render.encoder setRenderPipelineState:render.pipeline->state];
  302. }
  303. if (render.dirty.has_flag(RenderState::DIRTY_VIEWPORT)) {
  304. [render.encoder setViewports:render.viewports.ptr() count:render.viewports.size()];
  305. }
  306. if (render.dirty.has_flag(RenderState::DIRTY_DEPTH)) {
  307. [render.encoder setDepthStencilState:render.pipeline->depth_stencil];
  308. }
  309. if (render.dirty.has_flag(RenderState::DIRTY_RASTER)) {
  310. render.pipeline->raster_state.apply(render.encoder);
  311. }
  312. if (render.dirty.has_flag(RenderState::DIRTY_SCISSOR) && !render.scissors.is_empty()) {
  313. size_t len = render.scissors.size();
  314. MTLScissorRect *rects = ALLOCA_ARRAY(MTLScissorRect, len);
  315. for (size_t i = 0; i < len; i++) {
  316. rects[i] = render.clip_to_render_area(render.scissors[i]);
  317. }
  318. [render.encoder setScissorRects:rects count:len];
  319. }
  320. if (render.dirty.has_flag(RenderState::DIRTY_BLEND) && render.blend_constants.has_value()) {
  321. [render.encoder setBlendColorRed:render.blend_constants->r green:render.blend_constants->g blue:render.blend_constants->b alpha:render.blend_constants->a];
  322. }
  323. if (render.dirty.has_flag(RenderState::DIRTY_VERTEX)) {
  324. uint32_t p_binding_count = render.vertex_buffers.size();
  325. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  326. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  327. offsets:render.vertex_offsets.ptr()
  328. withRange:NSMakeRange(first, p_binding_count)];
  329. }
  330. render.dirty.clear();
  331. }
  332. void MDCommandBuffer::render_set_viewport(VectorView<Rect2i> p_viewports) {
  333. render.viewports.resize(p_viewports.size());
  334. for (uint32_t i = 0; i < p_viewports.size(); i += 1) {
  335. Rect2i const &vp = p_viewports[i];
  336. render.viewports[i] = {
  337. .originX = static_cast<double>(vp.position.x),
  338. .originY = static_cast<double>(vp.position.y),
  339. .width = static_cast<double>(vp.size.width),
  340. .height = static_cast<double>(vp.size.height),
  341. .znear = 0.0,
  342. .zfar = 1.0,
  343. };
  344. }
  345. render.dirty.set_flag(RenderState::DIRTY_VIEWPORT);
  346. }
  347. void MDCommandBuffer::render_set_scissor(VectorView<Rect2i> p_scissors) {
  348. render.scissors.resize(p_scissors.size());
  349. for (uint32_t i = 0; i < p_scissors.size(); i += 1) {
  350. Rect2i const &vp = p_scissors[i];
  351. render.scissors[i] = {
  352. .x = static_cast<NSUInteger>(vp.position.x),
  353. .y = static_cast<NSUInteger>(vp.position.y),
  354. .width = static_cast<NSUInteger>(vp.size.width),
  355. .height = static_cast<NSUInteger>(vp.size.height),
  356. };
  357. }
  358. render.dirty.set_flag(RenderState::DIRTY_SCISSOR);
  359. }
  360. void MDCommandBuffer::render_set_blend_constants(const Color &p_constants) {
  361. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  362. if (render.blend_constants != p_constants) {
  363. render.blend_constants = p_constants;
  364. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  365. }
  366. }
  367. void BoundUniformSet::merge_into(ResourceUsageMap &p_dst) const {
  368. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : usage_to_resources) {
  369. ResourceVector *resources = p_dst.getptr(keyval.key);
  370. if (resources == nullptr) {
  371. resources = &p_dst.insert(keyval.key, ResourceVector())->value;
  372. }
  373. // Reserve space for the new resources, assuming they are all added.
  374. resources->reserve(resources->size() + keyval.value.size());
  375. uint32_t i = 0, j = 0;
  376. __unsafe_unretained id<MTLResource> *resources_ptr = resources->ptr();
  377. const __unsafe_unretained id<MTLResource> *keyval_ptr = keyval.value.ptr();
  378. // 2-way merge.
  379. while (i < resources->size() && j < keyval.value.size()) {
  380. if (resources_ptr[i] < keyval_ptr[j]) {
  381. i++;
  382. } else if (resources_ptr[i] > keyval_ptr[j]) {
  383. resources->insert(i, keyval_ptr[j]);
  384. i++;
  385. j++;
  386. } else {
  387. i++;
  388. j++;
  389. }
  390. }
  391. // Append the remaining resources.
  392. for (; j < keyval.value.size(); j++) {
  393. resources->push_back(keyval_ptr[j]);
  394. }
  395. }
  396. }
  397. void MDCommandBuffer::_render_bind_uniform_sets() {
  398. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  399. if (!render.dirty.has_flag(RenderState::DIRTY_UNIFORMS)) {
  400. return;
  401. }
  402. render.dirty.clear_flag(RenderState::DIRTY_UNIFORMS);
  403. uint64_t set_uniforms = render.uniform_set_mask;
  404. render.uniform_set_mask = 0;
  405. MDRenderShader *shader = render.pipeline->shader;
  406. while (set_uniforms != 0) {
  407. // Find the index of the next set bit.
  408. int index = __builtin_ctzll(set_uniforms);
  409. // Clear the set bit.
  410. set_uniforms &= (set_uniforms - 1);
  411. MDUniformSet *set = render.uniform_sets[index];
  412. if (set == nullptr || set->index >= (uint32_t)shader->sets.size()) {
  413. continue;
  414. }
  415. set->bind_uniforms(shader, render);
  416. }
  417. }
  418. void MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, Size2i p_fb_size, VectorView<Rect2i> p_rects) {
  419. uint32_t idx = 0;
  420. for (uint32_t i = 0; i < p_rects.size(); i++) {
  421. Rect2i const &rect = p_rects[i];
  422. idx = _populate_vertices(p_vertices, idx, rect, p_fb_size);
  423. }
  424. }
  425. uint32_t MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, uint32_t p_index, Rect2i const &p_rect, Size2i p_fb_size) {
  426. // Determine the positions of the four edges of the
  427. // clear rectangle as a fraction of the attachment size.
  428. float leftPos = (float)(p_rect.position.x) / (float)p_fb_size.width;
  429. float rightPos = (float)(p_rect.size.width) / (float)p_fb_size.width + leftPos;
  430. float bottomPos = (float)(p_rect.position.y) / (float)p_fb_size.height;
  431. float topPos = (float)(p_rect.size.height) / (float)p_fb_size.height + bottomPos;
  432. // Transform to clip-space coordinates, which are bounded by (-1.0 < p < 1.0) in clip-space.
  433. leftPos = (leftPos * 2.0f) - 1.0f;
  434. rightPos = (rightPos * 2.0f) - 1.0f;
  435. bottomPos = (bottomPos * 2.0f) - 1.0f;
  436. topPos = (topPos * 2.0f) - 1.0f;
  437. simd::float4 vtx;
  438. uint32_t idx = p_index;
  439. uint32_t endLayer = render.get_subpass().view_count;
  440. for (uint32_t layer = 0; layer < endLayer; layer++) {
  441. vtx.z = 0.0;
  442. vtx.w = (float)layer;
  443. // Top left vertex - First triangle.
  444. vtx.y = topPos;
  445. vtx.x = leftPos;
  446. p_vertices[idx++] = vtx;
  447. // Bottom left vertex.
  448. vtx.y = bottomPos;
  449. vtx.x = leftPos;
  450. p_vertices[idx++] = vtx;
  451. // Bottom right vertex.
  452. vtx.y = bottomPos;
  453. vtx.x = rightPos;
  454. p_vertices[idx++] = vtx;
  455. // Bottom right vertex - Second triangle.
  456. p_vertices[idx++] = vtx;
  457. // Top right vertex.
  458. vtx.y = topPos;
  459. vtx.x = rightPos;
  460. p_vertices[idx++] = vtx;
  461. // Top left vertex.
  462. vtx.y = topPos;
  463. vtx.x = leftPos;
  464. p_vertices[idx++] = vtx;
  465. }
  466. return idx;
  467. }
  468. void MDCommandBuffer::render_begin_pass(RDD::RenderPassID p_render_pass, RDD::FramebufferID p_frameBuffer, RDD::CommandBufferType p_cmd_buffer_type, const Rect2i &p_rect, VectorView<RDD::RenderPassClearValue> p_clear_values) {
  469. DEV_ASSERT(commandBuffer != nil);
  470. end();
  471. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  472. MDFrameBuffer *fb = (MDFrameBuffer *)(p_frameBuffer.id);
  473. type = MDCommandBufferStateType::Render;
  474. render.pass = pass;
  475. render.current_subpass = UINT32_MAX;
  476. render.render_area = p_rect;
  477. render.clear_values.resize(p_clear_values.size());
  478. for (uint32_t i = 0; i < p_clear_values.size(); i++) {
  479. render.clear_values[i] = p_clear_values[i];
  480. }
  481. render.is_rendering_entire_area = (p_rect.position == Point2i(0, 0)) && p_rect.size == fb->size;
  482. render.frameBuffer = fb;
  483. render_next_subpass();
  484. }
  485. void MDCommandBuffer::_end_render_pass() {
  486. MDFrameBuffer const &fb_info = *render.frameBuffer;
  487. MDSubpass const &subpass = render.get_subpass();
  488. PixelFormats &pf = device_driver->get_pixel_formats();
  489. for (uint32_t i = 0; i < subpass.resolve_references.size(); i++) {
  490. uint32_t color_index = subpass.color_references[i].attachment;
  491. uint32_t resolve_index = subpass.resolve_references[i].attachment;
  492. DEV_ASSERT((color_index == RDD::AttachmentReference::UNUSED) == (resolve_index == RDD::AttachmentReference::UNUSED));
  493. if (color_index == RDD::AttachmentReference::UNUSED || !fb_info.has_texture(color_index)) {
  494. continue;
  495. }
  496. id<MTLTexture> resolve_tex = fb_info.get_texture(resolve_index);
  497. CRASH_COND_MSG(!flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve), "not implemented: unresolvable texture types");
  498. // see: https://github.com/KhronosGroup/MoltenVK/blob/d20d13fe2735adb845636a81522df1b9d89c0fba/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm#L407
  499. }
  500. render.end_encoding();
  501. }
  502. void MDCommandBuffer::_render_clear_render_area() {
  503. MDRenderPass const &pass = *render.pass;
  504. MDSubpass const &subpass = render.get_subpass();
  505. // First determine attachments that should be cleared.
  506. LocalVector<RDD::AttachmentClear> clears;
  507. clears.reserve(subpass.color_references.size() + /* possible depth stencil clear */ 1);
  508. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  509. uint32_t idx = subpass.color_references[i].attachment;
  510. if (idx != RDD::AttachmentReference::UNUSED && pass.attachments[idx].shouldClear(subpass, false)) {
  511. clears.push_back({ .aspect = RDD::TEXTURE_ASPECT_COLOR_BIT, .color_attachment = idx, .value = render.clear_values[idx] });
  512. }
  513. }
  514. uint32_t ds_index = subpass.depth_stencil_reference.attachment;
  515. bool shouldClearDepth = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, false));
  516. bool shouldClearStencil = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, true));
  517. if (shouldClearDepth || shouldClearStencil) {
  518. MDAttachment const &attachment = pass.attachments[ds_index];
  519. BitField<RDD::TextureAspectBits> bits;
  520. if (shouldClearDepth && attachment.type & MDAttachmentType::Depth) {
  521. bits.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  522. }
  523. if (shouldClearStencil && attachment.type & MDAttachmentType::Stencil) {
  524. bits.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  525. }
  526. clears.push_back({ .aspect = bits, .color_attachment = ds_index, .value = render.clear_values[ds_index] });
  527. }
  528. if (clears.is_empty()) {
  529. return;
  530. }
  531. render_clear_attachments(clears, { render.render_area });
  532. }
  533. void MDCommandBuffer::render_next_subpass() {
  534. DEV_ASSERT(commandBuffer != nil);
  535. if (render.current_subpass == UINT32_MAX) {
  536. render.current_subpass = 0;
  537. } else {
  538. _end_render_pass();
  539. render.current_subpass++;
  540. }
  541. MDFrameBuffer const &fb = *render.frameBuffer;
  542. MDRenderPass const &pass = *render.pass;
  543. MDSubpass const &subpass = render.get_subpass();
  544. MTLRenderPassDescriptor *desc = MTLRenderPassDescriptor.renderPassDescriptor;
  545. if (subpass.view_count > 1) {
  546. desc.renderTargetArrayLength = subpass.view_count;
  547. }
  548. PixelFormats &pf = device_driver->get_pixel_formats();
  549. uint32_t attachmentCount = 0;
  550. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  551. uint32_t idx = subpass.color_references[i].attachment;
  552. if (idx == RDD::AttachmentReference::UNUSED) {
  553. continue;
  554. }
  555. attachmentCount += 1;
  556. MTLRenderPassColorAttachmentDescriptor *ca = desc.colorAttachments[i];
  557. uint32_t resolveIdx = subpass.resolve_references.is_empty() ? RDD::AttachmentReference::UNUSED : subpass.resolve_references[i].attachment;
  558. bool has_resolve = resolveIdx != RDD::AttachmentReference::UNUSED;
  559. bool can_resolve = true;
  560. if (resolveIdx != RDD::AttachmentReference::UNUSED) {
  561. id<MTLTexture> resolve_tex = fb.get_texture(resolveIdx);
  562. can_resolve = flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve);
  563. if (can_resolve) {
  564. ca.resolveTexture = resolve_tex;
  565. } else {
  566. CRASH_NOW_MSG("unimplemented: using a texture format that is not supported for resolve");
  567. }
  568. }
  569. MDAttachment const &attachment = pass.attachments[idx];
  570. id<MTLTexture> tex = fb.get_texture(idx);
  571. ERR_FAIL_NULL_MSG(tex, "Frame buffer color texture is null.");
  572. if ((attachment.type & MDAttachmentType::Color)) {
  573. if (attachment.configureDescriptor(ca, pf, subpass, tex, render.is_rendering_entire_area, has_resolve, can_resolve, false)) {
  574. Color clearColor = render.clear_values[idx].color;
  575. ca.clearColor = MTLClearColorMake(clearColor.r, clearColor.g, clearColor.b, clearColor.a);
  576. }
  577. }
  578. }
  579. if (subpass.depth_stencil_reference.attachment != RDD::AttachmentReference::UNUSED) {
  580. attachmentCount += 1;
  581. uint32_t idx = subpass.depth_stencil_reference.attachment;
  582. MDAttachment const &attachment = pass.attachments[idx];
  583. id<MTLTexture> tex = fb.get_texture(idx);
  584. ERR_FAIL_NULL_MSG(tex, "Frame buffer depth / stencil texture is null.");
  585. if (attachment.type & MDAttachmentType::Depth) {
  586. MTLRenderPassDepthAttachmentDescriptor *da = desc.depthAttachment;
  587. if (attachment.configureDescriptor(da, pf, subpass, tex, render.is_rendering_entire_area, false, false, false)) {
  588. da.clearDepth = render.clear_values[idx].depth;
  589. }
  590. }
  591. if (attachment.type & MDAttachmentType::Stencil) {
  592. MTLRenderPassStencilAttachmentDescriptor *sa = desc.stencilAttachment;
  593. if (attachment.configureDescriptor(sa, pf, subpass, tex, render.is_rendering_entire_area, false, false, true)) {
  594. sa.clearStencil = render.clear_values[idx].stencil;
  595. }
  596. }
  597. }
  598. desc.renderTargetWidth = MAX((NSUInteger)MIN(render.render_area.position.x + render.render_area.size.width, fb.size.width), 1u);
  599. desc.renderTargetHeight = MAX((NSUInteger)MIN(render.render_area.position.y + render.render_area.size.height, fb.size.height), 1u);
  600. if (attachmentCount == 0) {
  601. // If there are no attachments, delay the creation of the encoder,
  602. // so we can use a matching sample count for the pipeline, by setting
  603. // the defaultRasterSampleCount from the pipeline's sample count.
  604. render.desc = desc;
  605. } else {
  606. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:desc];
  607. if (!render.is_rendering_entire_area) {
  608. _render_clear_render_area();
  609. }
  610. // With a new encoder, all state is dirty.
  611. render.dirty.set_flag(RenderState::DIRTY_ALL);
  612. }
  613. }
  614. void MDCommandBuffer::render_draw(uint32_t p_vertex_count,
  615. uint32_t p_instance_count,
  616. uint32_t p_base_vertex,
  617. uint32_t p_first_instance) {
  618. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  619. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  620. _render_set_dirty_state();
  621. MDSubpass const &subpass = render.get_subpass();
  622. if (subpass.view_count > 1) {
  623. p_instance_count *= subpass.view_count;
  624. }
  625. DEV_ASSERT(render.dirty == 0);
  626. id<MTLRenderCommandEncoder> enc = render.encoder;
  627. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  628. vertexStart:p_base_vertex
  629. vertexCount:p_vertex_count
  630. instanceCount:p_instance_count
  631. baseInstance:p_first_instance];
  632. }
  633. void MDCommandBuffer::render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets) {
  634. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  635. render.vertex_buffers.resize(p_binding_count);
  636. render.vertex_offsets.resize(p_binding_count);
  637. // Reverse the buffers, as their bindings are assigned in descending order.
  638. for (uint32_t i = 0; i < p_binding_count; i += 1) {
  639. render.vertex_buffers[i] = rid::get(p_buffers[p_binding_count - i - 1]);
  640. render.vertex_offsets[i] = p_offsets[p_binding_count - i - 1];
  641. }
  642. if (render.encoder) {
  643. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  644. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  645. offsets:render.vertex_offsets.ptr()
  646. withRange:NSMakeRange(first, p_binding_count)];
  647. } else {
  648. render.dirty.set_flag(RenderState::DIRTY_VERTEX);
  649. }
  650. }
  651. void MDCommandBuffer::render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) {
  652. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  653. render.index_buffer = rid::get(p_buffer);
  654. render.index_type = p_format == RDD::IndexBufferFormat::INDEX_BUFFER_FORMAT_UINT16 ? MTLIndexTypeUInt16 : MTLIndexTypeUInt32;
  655. render.index_offset = p_offset;
  656. }
  657. void MDCommandBuffer::render_draw_indexed(uint32_t p_index_count,
  658. uint32_t p_instance_count,
  659. uint32_t p_first_index,
  660. int32_t p_vertex_offset,
  661. uint32_t p_first_instance) {
  662. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  663. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  664. _render_set_dirty_state();
  665. MDSubpass const &subpass = render.get_subpass();
  666. if (subpass.view_count > 1) {
  667. p_instance_count *= subpass.view_count;
  668. }
  669. id<MTLRenderCommandEncoder> enc = render.encoder;
  670. uint32_t index_offset = render.index_offset;
  671. index_offset += p_first_index * (render.index_type == MTLIndexTypeUInt16 ? sizeof(uint16_t) : sizeof(uint32_t));
  672. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  673. indexCount:p_index_count
  674. indexType:render.index_type
  675. indexBuffer:render.index_buffer
  676. indexBufferOffset:index_offset
  677. instanceCount:p_instance_count
  678. baseVertex:p_vertex_offset
  679. baseInstance:p_first_instance];
  680. }
  681. void MDCommandBuffer::render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  682. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  683. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  684. _render_set_dirty_state();
  685. id<MTLRenderCommandEncoder> enc = render.encoder;
  686. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  687. NSUInteger indirect_offset = p_offset;
  688. for (uint32_t i = 0; i < p_draw_count; i++) {
  689. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  690. indexType:render.index_type
  691. indexBuffer:render.index_buffer
  692. indexBufferOffset:0
  693. indirectBuffer:indirect_buffer
  694. indirectBufferOffset:indirect_offset];
  695. indirect_offset += p_stride;
  696. }
  697. }
  698. void MDCommandBuffer::render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  699. ERR_FAIL_MSG("not implemented");
  700. }
  701. void MDCommandBuffer::render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  702. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  703. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  704. _render_set_dirty_state();
  705. id<MTLRenderCommandEncoder> enc = render.encoder;
  706. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  707. NSUInteger indirect_offset = p_offset;
  708. for (uint32_t i = 0; i < p_draw_count; i++) {
  709. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  710. indirectBuffer:indirect_buffer
  711. indirectBufferOffset:indirect_offset];
  712. indirect_offset += p_stride;
  713. }
  714. }
  715. void MDCommandBuffer::render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  716. ERR_FAIL_MSG("not implemented");
  717. }
  718. void MDCommandBuffer::render_end_pass() {
  719. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  720. render.end_encoding();
  721. render.reset();
  722. type = MDCommandBufferStateType::None;
  723. }
  724. #pragma mark - RenderState
  725. void MDCommandBuffer::RenderState::reset() {
  726. pass = nil;
  727. frameBuffer = nil;
  728. pipeline = nil;
  729. current_subpass = UINT32_MAX;
  730. render_area = {};
  731. is_rendering_entire_area = false;
  732. desc = nil;
  733. encoder = nil;
  734. index_buffer = nil;
  735. index_type = MTLIndexTypeUInt16;
  736. dirty = DIRTY_NONE;
  737. uniform_sets.clear();
  738. uniform_set_mask = 0;
  739. clear_values.clear();
  740. viewports.clear();
  741. scissors.clear();
  742. blend_constants.reset();
  743. vertex_buffers.clear();
  744. vertex_offsets.clear();
  745. // Keep the keys, as they are likely to be used again.
  746. for (KeyValue<StageResourceUsage, LocalVector<__unsafe_unretained id<MTLResource>>> &kv : resource_usage) {
  747. kv.value.clear();
  748. }
  749. }
  750. void MDCommandBuffer::RenderState::end_encoding() {
  751. if (encoder == nil) {
  752. return;
  753. }
  754. // Bind all resources.
  755. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  756. if (keyval.value.is_empty()) {
  757. continue;
  758. }
  759. MTLResourceUsage vert_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_VERTEX);
  760. MTLResourceUsage frag_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_FRAGMENT);
  761. if (vert_usage == frag_usage) {
  762. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex | MTLRenderStageFragment];
  763. } else {
  764. if (vert_usage != 0) {
  765. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex];
  766. }
  767. if (frag_usage != 0) {
  768. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:frag_usage stages:MTLRenderStageFragment];
  769. }
  770. }
  771. }
  772. [encoder endEncoding];
  773. encoder = nil;
  774. }
  775. #pragma mark - ComputeState
  776. void MDCommandBuffer::ComputeState::end_encoding() {
  777. if (encoder == nil) {
  778. return;
  779. }
  780. // Bind all resources.
  781. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  782. if (keyval.value.is_empty()) {
  783. continue;
  784. }
  785. MTLResourceUsage usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_COMPUTE);
  786. if (usage != 0) {
  787. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:usage];
  788. }
  789. }
  790. [encoder endEncoding];
  791. encoder = nil;
  792. }
  793. #pragma mark - Compute
  794. void MDCommandBuffer::compute_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  795. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  796. MDShader *shader = (MDShader *)(p_shader.id);
  797. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  798. set->bind_uniforms(shader, compute);
  799. }
  800. void MDCommandBuffer::compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  801. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  802. MDShader *shader = (MDShader *)(p_shader.id);
  803. // TODO(sgc): Bind multiple buffers using [encoder setBuffers:offsets:withRange:]
  804. for (size_t i = 0u; i < p_set_count; ++i) {
  805. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  806. set->bind_uniforms(shader, compute);
  807. }
  808. }
  809. void MDCommandBuffer::compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  810. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  811. MTLRegion region = MTLRegionMake3D(0, 0, 0, p_x_groups, p_y_groups, p_z_groups);
  812. id<MTLComputeCommandEncoder> enc = compute.encoder;
  813. [enc dispatchThreadgroups:region.size threadsPerThreadgroup:compute.pipeline->compute_state.local];
  814. }
  815. void MDCommandBuffer::compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) {
  816. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  817. id<MTLBuffer> indirectBuffer = rid::get(p_indirect_buffer);
  818. id<MTLComputeCommandEncoder> enc = compute.encoder;
  819. [enc dispatchThreadgroupsWithIndirectBuffer:indirectBuffer indirectBufferOffset:p_offset threadsPerThreadgroup:compute.pipeline->compute_state.local];
  820. }
  821. void MDCommandBuffer::_end_compute_dispatch() {
  822. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  823. compute.end_encoding();
  824. compute.reset();
  825. type = MDCommandBufferStateType::None;
  826. }
  827. void MDCommandBuffer::_end_blit() {
  828. DEV_ASSERT(type == MDCommandBufferStateType::Blit);
  829. [blit.encoder endEncoding];
  830. blit.reset();
  831. type = MDCommandBufferStateType::None;
  832. }
  833. MDComputeShader::MDComputeShader(CharString p_name,
  834. Vector<UniformSet> p_sets,
  835. bool p_uses_argument_buffers,
  836. MDLibrary *p_kernel) :
  837. MDShader(p_name, p_sets, p_uses_argument_buffers), kernel(p_kernel) {
  838. }
  839. void MDComputeShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  840. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Compute);
  841. if (push_constants.binding == (uint32_t)-1) {
  842. return;
  843. }
  844. id<MTLComputeCommandEncoder> enc = p_cb->compute.encoder;
  845. void const *ptr = p_data.ptr();
  846. size_t length = p_data.size() * sizeof(uint32_t);
  847. [enc setBytes:ptr length:length atIndex:push_constants.binding];
  848. }
  849. MDRenderShader::MDRenderShader(CharString p_name,
  850. Vector<UniformSet> p_sets,
  851. bool p_needs_view_mask_buffer,
  852. bool p_uses_argument_buffers,
  853. MDLibrary *_Nonnull p_vert, MDLibrary *_Nonnull p_frag) :
  854. MDShader(p_name, p_sets, p_uses_argument_buffers),
  855. needs_view_mask_buffer(p_needs_view_mask_buffer),
  856. vert(p_vert),
  857. frag(p_frag) {
  858. }
  859. void MDRenderShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  860. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Render);
  861. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_cb->render.encoder;
  862. void const *ptr = p_data.ptr();
  863. size_t length = p_data.size() * sizeof(uint32_t);
  864. if (push_constants.vert.binding > -1) {
  865. [enc setVertexBytes:ptr length:length atIndex:push_constants.vert.binding];
  866. }
  867. if (push_constants.frag.binding > -1) {
  868. [enc setFragmentBytes:ptr length:length atIndex:push_constants.frag.binding];
  869. }
  870. }
  871. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  872. DEV_ASSERT(p_shader->uses_argument_buffers);
  873. DEV_ASSERT(p_state.encoder != nil);
  874. UniformSet const &set_info = p_shader->sets[index];
  875. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  876. id<MTLDevice> __unsafe_unretained device = enc.device;
  877. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage);
  878. // Set the buffer for the vertex stage.
  879. {
  880. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_VERTEX);
  881. if (offset) {
  882. [enc setVertexBuffer:bus.buffer offset:*offset atIndex:index];
  883. }
  884. }
  885. // Set the buffer for the fragment stage.
  886. {
  887. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_FRAGMENT);
  888. if (offset) {
  889. [enc setFragmentBuffer:bus.buffer offset:*offset atIndex:index];
  890. }
  891. }
  892. }
  893. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  894. DEV_ASSERT(!p_shader->uses_argument_buffers);
  895. DEV_ASSERT(p_state.encoder != nil);
  896. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  897. UniformSet const &set = p_shader->sets[index];
  898. for (uint32_t i = 0; i < uniforms.size(); i++) {
  899. RDD::BoundUniform const &uniform = uniforms[i];
  900. UniformInfo ui = set.uniforms[i];
  901. static const RDC::ShaderStage stage_usages[2] = { RDC::ShaderStage::SHADER_STAGE_VERTEX, RDC::ShaderStage::SHADER_STAGE_FRAGMENT };
  902. for (const RDC::ShaderStage stage : stage_usages) {
  903. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  904. BindingInfo *bi = ui.bindings.getptr(stage);
  905. if (bi == nullptr) {
  906. // No binding for this stage.
  907. continue;
  908. }
  909. if ((ui.active_stages & stage_usage) == 0) {
  910. // Not active for this state, so don't bind anything.
  911. continue;
  912. }
  913. switch (uniform.type) {
  914. case RDD::UNIFORM_TYPE_SAMPLER: {
  915. size_t count = uniform.ids.size();
  916. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  917. for (size_t j = 0; j < count; j += 1) {
  918. objects[j] = rid::get(uniform.ids[j].id);
  919. }
  920. if (stage == RDD::SHADER_STAGE_VERTEX) {
  921. [enc setVertexSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  922. } else {
  923. [enc setFragmentSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  924. }
  925. } break;
  926. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  927. size_t count = uniform.ids.size() / 2;
  928. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  929. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  930. for (uint32_t j = 0; j < count; j += 1) {
  931. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  932. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  933. samplers[j] = sampler;
  934. textures[j] = texture;
  935. }
  936. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  937. if (sbi) {
  938. if (stage == RDD::SHADER_STAGE_VERTEX) {
  939. [enc setVertexSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  940. } else {
  941. [enc setFragmentSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  942. }
  943. }
  944. if (stage == RDD::SHADER_STAGE_VERTEX) {
  945. [enc setVertexTextures:textures withRange:NSMakeRange(bi->index, count)];
  946. } else {
  947. [enc setFragmentTextures:textures withRange:NSMakeRange(bi->index, count)];
  948. }
  949. } break;
  950. case RDD::UNIFORM_TYPE_TEXTURE: {
  951. size_t count = uniform.ids.size();
  952. if (count == 1) {
  953. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  954. if (stage == RDD::SHADER_STAGE_VERTEX) {
  955. [enc setVertexTexture:obj atIndex:bi->index];
  956. } else {
  957. [enc setFragmentTexture:obj atIndex:bi->index];
  958. }
  959. } else {
  960. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  961. for (size_t j = 0; j < count; j += 1) {
  962. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  963. objects[j] = obj;
  964. }
  965. if (stage == RDD::SHADER_STAGE_VERTEX) {
  966. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  967. } else {
  968. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  969. }
  970. }
  971. } break;
  972. case RDD::UNIFORM_TYPE_IMAGE: {
  973. size_t count = uniform.ids.size();
  974. if (count == 1) {
  975. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  976. if (stage == RDD::SHADER_STAGE_VERTEX) {
  977. [enc setVertexTexture:obj atIndex:bi->index];
  978. } else {
  979. [enc setFragmentTexture:obj atIndex:bi->index];
  980. }
  981. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  982. if (sbi) {
  983. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  984. id<MTLBuffer> buf = tex.buffer;
  985. if (buf) {
  986. if (stage == RDD::SHADER_STAGE_VERTEX) {
  987. [enc setVertexBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  988. } else {
  989. [enc setFragmentBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  990. }
  991. }
  992. }
  993. } else {
  994. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  995. for (size_t j = 0; j < count; j += 1) {
  996. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  997. objects[j] = obj;
  998. }
  999. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1000. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1001. } else {
  1002. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1003. }
  1004. }
  1005. } break;
  1006. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1007. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1008. } break;
  1009. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1010. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1011. } break;
  1012. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1013. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1014. } break;
  1015. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1016. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1017. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1018. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1019. } else {
  1020. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1021. }
  1022. } break;
  1023. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1024. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1025. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1026. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1027. } else {
  1028. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1029. }
  1030. } break;
  1031. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1032. size_t count = uniform.ids.size();
  1033. if (count == 1) {
  1034. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1035. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1036. [enc setVertexTexture:obj atIndex:bi->index];
  1037. } else {
  1038. [enc setFragmentTexture:obj atIndex:bi->index];
  1039. }
  1040. } else {
  1041. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1042. for (size_t j = 0; j < count; j += 1) {
  1043. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1044. objects[j] = obj;
  1045. }
  1046. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1047. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1048. } else {
  1049. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1050. }
  1051. }
  1052. } break;
  1053. default: {
  1054. DEV_ASSERT(false);
  1055. }
  1056. }
  1057. }
  1058. }
  1059. }
  1060. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  1061. if (p_shader->uses_argument_buffers) {
  1062. bind_uniforms_argument_buffers(p_shader, p_state);
  1063. } else {
  1064. bind_uniforms_direct(p_shader, p_state);
  1065. }
  1066. }
  1067. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1068. DEV_ASSERT(p_shader->uses_argument_buffers);
  1069. DEV_ASSERT(p_state.encoder != nil);
  1070. UniformSet const &set_info = p_shader->sets[index];
  1071. id<MTLComputeCommandEncoder> enc = p_state.encoder;
  1072. id<MTLDevice> device = enc.device;
  1073. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage);
  1074. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_COMPUTE);
  1075. if (offset) {
  1076. [enc setBuffer:bus.buffer offset:*offset atIndex:index];
  1077. }
  1078. }
  1079. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1080. DEV_ASSERT(!p_shader->uses_argument_buffers);
  1081. DEV_ASSERT(p_state.encoder != nil);
  1082. id<MTLComputeCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  1083. UniformSet const &set = p_shader->sets[index];
  1084. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1085. RDD::BoundUniform const &uniform = uniforms[i];
  1086. UniformInfo ui = set.uniforms[i];
  1087. const RDC::ShaderStage stage = RDC::ShaderStage::SHADER_STAGE_COMPUTE;
  1088. const ShaderStageUsage stage_usage = ShaderStageUsage(1 << stage);
  1089. BindingInfo *bi = ui.bindings.getptr(stage);
  1090. if (bi == nullptr) {
  1091. // No binding for this stage.
  1092. continue;
  1093. }
  1094. if ((ui.active_stages & stage_usage) == 0) {
  1095. // Not active for this state, so don't bind anything.
  1096. continue;
  1097. }
  1098. switch (uniform.type) {
  1099. case RDD::UNIFORM_TYPE_SAMPLER: {
  1100. size_t count = uniform.ids.size();
  1101. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1102. for (size_t j = 0; j < count; j += 1) {
  1103. objects[j] = rid::get(uniform.ids[j].id);
  1104. }
  1105. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1106. } break;
  1107. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1108. size_t count = uniform.ids.size() / 2;
  1109. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1110. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1111. for (uint32_t j = 0; j < count; j += 1) {
  1112. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1113. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1114. samplers[j] = sampler;
  1115. textures[j] = texture;
  1116. }
  1117. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1118. if (sbi) {
  1119. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1120. }
  1121. [enc setTextures:textures withRange:NSMakeRange(bi->index, count)];
  1122. } break;
  1123. case RDD::UNIFORM_TYPE_TEXTURE: {
  1124. size_t count = uniform.ids.size();
  1125. if (count == 1) {
  1126. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1127. [enc setTexture:obj atIndex:bi->index];
  1128. } else {
  1129. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1130. for (size_t j = 0; j < count; j += 1) {
  1131. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1132. objects[j] = obj;
  1133. }
  1134. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1135. }
  1136. } break;
  1137. case RDD::UNIFORM_TYPE_IMAGE: {
  1138. size_t count = uniform.ids.size();
  1139. if (count == 1) {
  1140. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1141. [enc setTexture:obj atIndex:bi->index];
  1142. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1143. if (sbi) {
  1144. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1145. id<MTLBuffer> buf = tex.buffer;
  1146. if (buf) {
  1147. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1148. }
  1149. }
  1150. } else {
  1151. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1152. for (size_t j = 0; j < count; j += 1) {
  1153. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1154. objects[j] = obj;
  1155. }
  1156. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1157. }
  1158. } break;
  1159. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1160. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1161. } break;
  1162. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1163. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1164. } break;
  1165. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1166. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1167. } break;
  1168. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1169. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1170. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1171. } break;
  1172. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1173. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1174. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1175. } break;
  1176. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1177. size_t count = uniform.ids.size();
  1178. if (count == 1) {
  1179. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1180. [enc setTexture:obj atIndex:bi->index];
  1181. } else {
  1182. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1183. for (size_t j = 0; j < count; j += 1) {
  1184. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1185. objects[j] = obj;
  1186. }
  1187. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1188. }
  1189. } break;
  1190. default: {
  1191. DEV_ASSERT(false);
  1192. }
  1193. }
  1194. }
  1195. }
  1196. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1197. if (p_shader->uses_argument_buffers) {
  1198. bind_uniforms_argument_buffers(p_shader, p_state);
  1199. } else {
  1200. bind_uniforms_direct(p_shader, p_state);
  1201. }
  1202. }
  1203. BoundUniformSet &MDUniformSet::bound_uniform_set(MDShader *p_shader, id<MTLDevice> p_device, ResourceUsageMap &p_resource_usage) {
  1204. BoundUniformSet *sus = bound_uniforms.getptr(p_shader);
  1205. if (sus != nullptr) {
  1206. sus->merge_into(p_resource_usage);
  1207. return *sus;
  1208. }
  1209. UniformSet const &set = p_shader->sets[index];
  1210. HashMap<id<MTLResource>, StageResourceUsage> bound_resources;
  1211. auto add_usage = [&bound_resources](id<MTLResource> __unsafe_unretained res, RDD::ShaderStage stage, MTLResourceUsage usage) {
  1212. StageResourceUsage *sru = bound_resources.getptr(res);
  1213. if (sru == nullptr) {
  1214. bound_resources.insert(res, stage_resource_usage(stage, usage));
  1215. } else {
  1216. *sru |= stage_resource_usage(stage, usage);
  1217. }
  1218. };
  1219. id<MTLBuffer> enc_buffer = nil;
  1220. if (set.buffer_size > 0) {
  1221. MTLResourceOptions options = MTLResourceStorageModeShared | MTLResourceHazardTrackingModeTracked;
  1222. enc_buffer = [p_device newBufferWithLength:set.buffer_size options:options];
  1223. for (KeyValue<RDC::ShaderStage, id<MTLArgumentEncoder>> const &kv : set.encoders) {
  1224. RDD::ShaderStage const stage = kv.key;
  1225. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  1226. id<MTLArgumentEncoder> const enc = kv.value;
  1227. [enc setArgumentBuffer:enc_buffer offset:set.offsets[stage]];
  1228. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1229. RDD::BoundUniform const &uniform = uniforms[i];
  1230. UniformInfo ui = set.uniforms[i];
  1231. BindingInfo *bi = ui.bindings.getptr(stage);
  1232. if (bi == nullptr) {
  1233. // No binding for this stage.
  1234. continue;
  1235. }
  1236. if ((ui.active_stages & stage_usage) == 0) {
  1237. // Not active for this state, so don't bind anything.
  1238. continue;
  1239. }
  1240. switch (uniform.type) {
  1241. case RDD::UNIFORM_TYPE_SAMPLER: {
  1242. size_t count = uniform.ids.size();
  1243. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1244. for (size_t j = 0; j < count; j += 1) {
  1245. objects[j] = rid::get(uniform.ids[j].id);
  1246. }
  1247. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1248. } break;
  1249. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1250. size_t count = uniform.ids.size() / 2;
  1251. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1252. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1253. for (uint32_t j = 0; j < count; j += 1) {
  1254. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1255. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1256. samplers[j] = sampler;
  1257. textures[j] = texture;
  1258. add_usage(texture, stage, bi->usage);
  1259. }
  1260. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1261. if (sbi) {
  1262. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1263. }
  1264. [enc setTextures:textures
  1265. withRange:NSMakeRange(bi->index, count)];
  1266. } break;
  1267. case RDD::UNIFORM_TYPE_TEXTURE: {
  1268. size_t count = uniform.ids.size();
  1269. if (count == 1) {
  1270. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1271. [enc setTexture:obj atIndex:bi->index];
  1272. add_usage(obj, stage, bi->usage);
  1273. } else {
  1274. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1275. for (size_t j = 0; j < count; j += 1) {
  1276. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1277. objects[j] = obj;
  1278. add_usage(obj, stage, bi->usage);
  1279. }
  1280. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1281. }
  1282. } break;
  1283. case RDD::UNIFORM_TYPE_IMAGE: {
  1284. size_t count = uniform.ids.size();
  1285. if (count == 1) {
  1286. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1287. [enc setTexture:obj atIndex:bi->index];
  1288. add_usage(obj, stage, bi->usage);
  1289. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1290. if (sbi) {
  1291. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1292. id<MTLBuffer> buf = tex.buffer;
  1293. if (buf) {
  1294. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1295. }
  1296. }
  1297. } else {
  1298. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1299. for (size_t j = 0; j < count; j += 1) {
  1300. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1301. objects[j] = obj;
  1302. add_usage(obj, stage, bi->usage);
  1303. }
  1304. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1305. }
  1306. } break;
  1307. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1308. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1309. } break;
  1310. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1311. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1312. } break;
  1313. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1314. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1315. } break;
  1316. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1317. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1318. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1319. add_usage(buffer, stage, bi->usage);
  1320. } break;
  1321. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1322. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1323. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1324. add_usage(buffer, stage, bi->usage);
  1325. } break;
  1326. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1327. size_t count = uniform.ids.size();
  1328. if (count == 1) {
  1329. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1330. [enc setTexture:obj atIndex:bi->index];
  1331. add_usage(obj, stage, bi->usage);
  1332. } else {
  1333. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1334. for (size_t j = 0; j < count; j += 1) {
  1335. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1336. objects[j] = obj;
  1337. add_usage(obj, stage, bi->usage);
  1338. }
  1339. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1340. }
  1341. } break;
  1342. default: {
  1343. DEV_ASSERT(false);
  1344. }
  1345. }
  1346. }
  1347. }
  1348. }
  1349. SearchArray<__unsafe_unretained id<MTLResource>> search;
  1350. ResourceUsageMap usage_to_resources;
  1351. for (KeyValue<id<MTLResource>, StageResourceUsage> const &keyval : bound_resources) {
  1352. ResourceVector *resources = usage_to_resources.getptr(keyval.value);
  1353. if (resources == nullptr) {
  1354. resources = &usage_to_resources.insert(keyval.value, ResourceVector())->value;
  1355. }
  1356. int64_t pos = search.bisect(resources->ptr(), resources->size(), keyval.key, true);
  1357. if (pos == resources->size() || (*resources)[pos] != keyval.key) {
  1358. resources->insert(pos, keyval.key);
  1359. }
  1360. }
  1361. BoundUniformSet bs = { .buffer = enc_buffer, .usage_to_resources = usage_to_resources };
  1362. bound_uniforms.insert(p_shader, bs);
  1363. bs.merge_into(p_resource_usage);
  1364. return bound_uniforms.get(p_shader);
  1365. }
  1366. MTLFmtCaps MDSubpass::getRequiredFmtCapsForAttachmentAt(uint32_t p_index) const {
  1367. MTLFmtCaps caps = kMTLFmtCapsNone;
  1368. for (RDD::AttachmentReference const &ar : input_references) {
  1369. if (ar.attachment == p_index) {
  1370. flags::set(caps, kMTLFmtCapsRead);
  1371. break;
  1372. }
  1373. }
  1374. for (RDD::AttachmentReference const &ar : color_references) {
  1375. if (ar.attachment == p_index) {
  1376. flags::set(caps, kMTLFmtCapsColorAtt);
  1377. break;
  1378. }
  1379. }
  1380. for (RDD::AttachmentReference const &ar : resolve_references) {
  1381. if (ar.attachment == p_index) {
  1382. flags::set(caps, kMTLFmtCapsResolve);
  1383. break;
  1384. }
  1385. }
  1386. if (depth_stencil_reference.attachment == p_index) {
  1387. flags::set(caps, kMTLFmtCapsDSAtt);
  1388. }
  1389. return caps;
  1390. }
  1391. void MDAttachment::linkToSubpass(const MDRenderPass &p_pass) {
  1392. firstUseSubpassIndex = UINT32_MAX;
  1393. lastUseSubpassIndex = 0;
  1394. for (MDSubpass const &subpass : p_pass.subpasses) {
  1395. MTLFmtCaps reqCaps = subpass.getRequiredFmtCapsForAttachmentAt(index);
  1396. if (reqCaps) {
  1397. firstUseSubpassIndex = MIN(subpass.subpass_index, firstUseSubpassIndex);
  1398. lastUseSubpassIndex = MAX(subpass.subpass_index, lastUseSubpassIndex);
  1399. }
  1400. }
  1401. }
  1402. MTLStoreAction MDAttachment::getMTLStoreAction(MDSubpass const &p_subpass,
  1403. bool p_is_rendering_entire_area,
  1404. bool p_has_resolve,
  1405. bool p_can_resolve,
  1406. bool p_is_stencil) const {
  1407. if (!p_is_rendering_entire_area || !isLastUseOf(p_subpass)) {
  1408. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1409. }
  1410. switch (p_is_stencil ? stencilStoreAction : storeAction) {
  1411. case MTLStoreActionStore:
  1412. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1413. case MTLStoreActionDontCare:
  1414. return p_has_resolve ? (p_can_resolve ? MTLStoreActionMultisampleResolve : MTLStoreActionStore) : MTLStoreActionDontCare;
  1415. default:
  1416. return MTLStoreActionStore;
  1417. }
  1418. }
  1419. bool MDAttachment::configureDescriptor(MTLRenderPassAttachmentDescriptor *p_desc,
  1420. PixelFormats &p_pf,
  1421. MDSubpass const &p_subpass,
  1422. id<MTLTexture> p_attachment,
  1423. bool p_is_rendering_entire_area,
  1424. bool p_has_resolve,
  1425. bool p_can_resolve,
  1426. bool p_is_stencil) const {
  1427. p_desc.texture = p_attachment;
  1428. MTLLoadAction load;
  1429. if (!p_is_rendering_entire_area || !isFirstUseOf(p_subpass)) {
  1430. load = MTLLoadActionLoad;
  1431. } else {
  1432. load = p_is_stencil ? stencilLoadAction : loadAction;
  1433. }
  1434. p_desc.loadAction = load;
  1435. MTLPixelFormat mtlFmt = p_attachment.pixelFormat;
  1436. bool isDepthFormat = p_pf.isDepthFormat(mtlFmt);
  1437. bool isStencilFormat = p_pf.isStencilFormat(mtlFmt);
  1438. if (isStencilFormat && !p_is_stencil && !isDepthFormat) {
  1439. p_desc.storeAction = MTLStoreActionDontCare;
  1440. } else {
  1441. p_desc.storeAction = getMTLStoreAction(p_subpass, p_is_rendering_entire_area, p_has_resolve, p_can_resolve, p_is_stencil);
  1442. }
  1443. return load == MTLLoadActionClear;
  1444. }
  1445. bool MDAttachment::shouldClear(const MDSubpass &p_subpass, bool p_is_stencil) const {
  1446. // If the subpass is not the first subpass to use this attachment, don't clear this attachment.
  1447. if (p_subpass.subpass_index != firstUseSubpassIndex) {
  1448. return false;
  1449. }
  1450. return (p_is_stencil ? stencilLoadAction : loadAction) == MTLLoadActionClear;
  1451. }
  1452. MDRenderPass::MDRenderPass(Vector<MDAttachment> &p_attachments, Vector<MDSubpass> &p_subpasses) :
  1453. attachments(p_attachments), subpasses(p_subpasses) {
  1454. for (MDAttachment &att : attachments) {
  1455. att.linkToSubpass(*this);
  1456. }
  1457. }
  1458. #pragma mark - Resource Factory
  1459. id<MTLFunction> MDResourceFactory::new_func(NSString *p_source, NSString *p_name, NSError **p_error) {
  1460. @autoreleasepool {
  1461. NSError *err = nil;
  1462. MTLCompileOptions *options = [MTLCompileOptions new];
  1463. id<MTLDevice> device = device_driver->get_device();
  1464. id<MTLLibrary> mtlLib = [device newLibraryWithSource:p_source
  1465. options:options
  1466. error:&err];
  1467. if (err) {
  1468. if (p_error != nil) {
  1469. *p_error = err;
  1470. }
  1471. }
  1472. return [mtlLib newFunctionWithName:p_name];
  1473. }
  1474. }
  1475. id<MTLFunction> MDResourceFactory::new_clear_vert_func(ClearAttKey &p_key) {
  1476. @autoreleasepool {
  1477. NSString *msl = [NSString stringWithFormat:@R"(
  1478. #include <metal_stdlib>
  1479. using namespace metal;
  1480. typedef struct {
  1481. float4 a_position [[attribute(0)]];
  1482. } AttributesPos;
  1483. typedef struct {
  1484. float4 colors[9];
  1485. } ClearColorsIn;
  1486. typedef struct {
  1487. float4 v_position [[position]];
  1488. uint layer%s;
  1489. } VaryingsPos;
  1490. vertex VaryingsPos vertClear(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1491. VaryingsPos varyings;
  1492. varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[%d].r, 1.0);
  1493. varyings.layer = uint(attributes.a_position.w);
  1494. return varyings;
  1495. }
  1496. )", p_key.is_layered_rendering_enabled() ? " [[render_target_array_index]]" : "", ClearAttKey::DEPTH_INDEX];
  1497. return new_func(msl, @"vertClear", nil);
  1498. }
  1499. }
  1500. id<MTLFunction> MDResourceFactory::new_clear_frag_func(ClearAttKey &p_key) {
  1501. @autoreleasepool {
  1502. NSMutableString *msl = [NSMutableString stringWithCapacity:2048];
  1503. [msl appendFormat:@R"(
  1504. #include <metal_stdlib>
  1505. using namespace metal;
  1506. typedef struct {
  1507. float4 v_position [[position]];
  1508. } VaryingsPos;
  1509. typedef struct {
  1510. float4 colors[9];
  1511. } ClearColorsIn;
  1512. typedef struct {
  1513. )"];
  1514. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1515. if (p_key.is_enabled(caIdx)) {
  1516. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1517. [msl appendFormat:@" %@4 color%u [[color(%u)]];\n", typeStr, caIdx, caIdx];
  1518. }
  1519. }
  1520. [msl appendFormat:@R"(} ClearColorsOut;
  1521. fragment ClearColorsOut fragClear(VaryingsPos varyings [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1522. ClearColorsOut ccOut;
  1523. )"];
  1524. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1525. if (p_key.is_enabled(caIdx)) {
  1526. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1527. [msl appendFormat:@" ccOut.color%u = %@4(ccIn.colors[%u]);\n", caIdx, typeStr, caIdx];
  1528. }
  1529. }
  1530. [msl appendString:@R"( return ccOut;
  1531. })"];
  1532. return new_func(msl, @"fragClear", nil);
  1533. }
  1534. }
  1535. NSString *MDResourceFactory::get_format_type_string(MTLPixelFormat p_fmt) {
  1536. switch (device_driver->get_pixel_formats().getFormatType(p_fmt)) {
  1537. case MTLFormatType::ColorInt8:
  1538. case MTLFormatType::ColorInt16:
  1539. return @"short";
  1540. case MTLFormatType::ColorUInt8:
  1541. case MTLFormatType::ColorUInt16:
  1542. return @"ushort";
  1543. case MTLFormatType::ColorInt32:
  1544. return @"int";
  1545. case MTLFormatType::ColorUInt32:
  1546. return @"uint";
  1547. case MTLFormatType::ColorHalf:
  1548. return @"half";
  1549. case MTLFormatType::ColorFloat:
  1550. case MTLFormatType::DepthStencil:
  1551. case MTLFormatType::Compressed:
  1552. return @"float";
  1553. case MTLFormatType::None:
  1554. return @"unexpected_MTLPixelFormatInvalid";
  1555. }
  1556. }
  1557. id<MTLDepthStencilState> MDResourceFactory::new_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1558. MTLDepthStencilDescriptor *dsDesc = [MTLDepthStencilDescriptor new];
  1559. dsDesc.depthCompareFunction = MTLCompareFunctionAlways;
  1560. dsDesc.depthWriteEnabled = p_use_depth;
  1561. if (p_use_stencil) {
  1562. MTLStencilDescriptor *sDesc = [MTLStencilDescriptor new];
  1563. sDesc.stencilCompareFunction = MTLCompareFunctionAlways;
  1564. sDesc.stencilFailureOperation = MTLStencilOperationReplace;
  1565. sDesc.depthFailureOperation = MTLStencilOperationReplace;
  1566. sDesc.depthStencilPassOperation = MTLStencilOperationReplace;
  1567. dsDesc.frontFaceStencil = sDesc;
  1568. dsDesc.backFaceStencil = sDesc;
  1569. } else {
  1570. dsDesc.frontFaceStencil = nil;
  1571. dsDesc.backFaceStencil = nil;
  1572. }
  1573. return [device_driver->get_device() newDepthStencilStateWithDescriptor:dsDesc];
  1574. }
  1575. id<MTLRenderPipelineState> MDResourceFactory::new_clear_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1576. PixelFormats &pixFmts = device_driver->get_pixel_formats();
  1577. id<MTLFunction> vtxFunc = new_clear_vert_func(p_key);
  1578. id<MTLFunction> fragFunc = new_clear_frag_func(p_key);
  1579. MTLRenderPipelineDescriptor *plDesc = [MTLRenderPipelineDescriptor new];
  1580. plDesc.label = @"ClearRenderAttachments";
  1581. plDesc.vertexFunction = vtxFunc;
  1582. plDesc.fragmentFunction = fragFunc;
  1583. plDesc.rasterSampleCount = p_key.sample_count;
  1584. plDesc.inputPrimitiveTopology = MTLPrimitiveTopologyClassTriangle;
  1585. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1586. MTLRenderPipelineColorAttachmentDescriptor *colorDesc = plDesc.colorAttachments[caIdx];
  1587. colorDesc.pixelFormat = (MTLPixelFormat)p_key.pixel_formats[caIdx];
  1588. colorDesc.writeMask = p_key.is_enabled(caIdx) ? MTLColorWriteMaskAll : MTLColorWriteMaskNone;
  1589. }
  1590. MTLPixelFormat mtlDepthFormat = p_key.depth_format();
  1591. if (pixFmts.isDepthFormat(mtlDepthFormat)) {
  1592. plDesc.depthAttachmentPixelFormat = mtlDepthFormat;
  1593. }
  1594. MTLPixelFormat mtlStencilFormat = p_key.stencil_format();
  1595. if (pixFmts.isStencilFormat(mtlStencilFormat)) {
  1596. plDesc.stencilAttachmentPixelFormat = mtlStencilFormat;
  1597. }
  1598. MTLVertexDescriptor *vtxDesc = plDesc.vertexDescriptor;
  1599. // Vertex attribute descriptors.
  1600. MTLVertexAttributeDescriptorArray *vaDescArray = vtxDesc.attributes;
  1601. MTLVertexAttributeDescriptor *vaDesc;
  1602. NSUInteger vtxBuffIdx = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX);
  1603. NSUInteger vtxStride = 0;
  1604. // Vertex location.
  1605. vaDesc = vaDescArray[0];
  1606. vaDesc.format = MTLVertexFormatFloat4;
  1607. vaDesc.bufferIndex = vtxBuffIdx;
  1608. vaDesc.offset = vtxStride;
  1609. vtxStride += sizeof(simd::float4);
  1610. // Vertex attribute buffer.
  1611. MTLVertexBufferLayoutDescriptorArray *vbDescArray = vtxDesc.layouts;
  1612. MTLVertexBufferLayoutDescriptor *vbDesc = vbDescArray[vtxBuffIdx];
  1613. vbDesc.stepFunction = MTLVertexStepFunctionPerVertex;
  1614. vbDesc.stepRate = 1;
  1615. vbDesc.stride = vtxStride;
  1616. return [device_driver->get_device() newRenderPipelineStateWithDescriptor:plDesc error:p_error];
  1617. }
  1618. id<MTLRenderPipelineState> MDResourceCache::get_clear_render_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1619. HashMap::ConstIterator it = clear_states.find(p_key);
  1620. if (it != clear_states.end()) {
  1621. return it->value;
  1622. }
  1623. id<MTLRenderPipelineState> state = resource_factory->new_clear_pipeline_state(p_key, p_error);
  1624. clear_states[p_key] = state;
  1625. return state;
  1626. }
  1627. id<MTLDepthStencilState> MDResourceCache::get_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1628. id<MTLDepthStencilState> __strong *val;
  1629. if (p_use_depth && p_use_stencil) {
  1630. val = &clear_depth_stencil_state.all;
  1631. } else if (p_use_depth) {
  1632. val = &clear_depth_stencil_state.depth_only;
  1633. } else if (p_use_stencil) {
  1634. val = &clear_depth_stencil_state.stencil_only;
  1635. } else {
  1636. val = &clear_depth_stencil_state.none;
  1637. }
  1638. DEV_ASSERT(val != nullptr);
  1639. if (*val == nil) {
  1640. *val = resource_factory->new_depth_stencil_state(p_use_depth, p_use_stencil);
  1641. }
  1642. return *val;
  1643. }
  1644. static const char *SHADER_STAGE_NAMES[] = {
  1645. [RD::SHADER_STAGE_VERTEX] = "vert",
  1646. [RD::SHADER_STAGE_FRAGMENT] = "frag",
  1647. [RD::SHADER_STAGE_TESSELATION_CONTROL] = "tess_ctrl",
  1648. [RD::SHADER_STAGE_TESSELATION_EVALUATION] = "tess_eval",
  1649. [RD::SHADER_STAGE_COMPUTE] = "comp",
  1650. };
  1651. void ShaderCacheEntry::notify_free() const {
  1652. owner.shader_cache_free_entry(key);
  1653. }
  1654. @interface MDLibrary ()
  1655. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry;
  1656. @end
  1657. /// Loads the MTLLibrary when the library is first accessed.
  1658. @interface MDLazyLibrary : MDLibrary {
  1659. id<MTLLibrary> _library;
  1660. NSError *_error;
  1661. std::shared_mutex _mu;
  1662. bool _loaded;
  1663. id<MTLDevice> _device;
  1664. NSString *_source;
  1665. MTLCompileOptions *_options;
  1666. }
  1667. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1668. device:(id<MTLDevice>)device
  1669. source:(NSString *)source
  1670. options:(MTLCompileOptions *)options;
  1671. @end
  1672. /// Loads the MTLLibrary immediately on initialization, using an asynchronous API.
  1673. @interface MDImmediateLibrary : MDLibrary {
  1674. id<MTLLibrary> _library;
  1675. NSError *_error;
  1676. std::mutex _cv_mutex;
  1677. std::condition_variable _cv;
  1678. std::atomic<bool> _complete;
  1679. bool _ready;
  1680. }
  1681. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1682. device:(id<MTLDevice>)device
  1683. source:(NSString *)source
  1684. options:(MTLCompileOptions *)options;
  1685. @end
  1686. @implementation MDLibrary
  1687. + (instancetype)newLibraryWithCacheEntry:(ShaderCacheEntry *)entry
  1688. device:(id<MTLDevice>)device
  1689. source:(NSString *)source
  1690. options:(MTLCompileOptions *)options
  1691. strategy:(ShaderLoadStrategy)strategy {
  1692. switch (strategy) {
  1693. case ShaderLoadStrategy::DEFAULT:
  1694. [[fallthrough]];
  1695. default:
  1696. return [[MDImmediateLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1697. case ShaderLoadStrategy::LAZY:
  1698. return [[MDLazyLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1699. }
  1700. }
  1701. - (id<MTLLibrary>)library {
  1702. CRASH_NOW_MSG("Not implemented");
  1703. return nil;
  1704. }
  1705. - (NSError *)error {
  1706. CRASH_NOW_MSG("Not implemented");
  1707. return nil;
  1708. }
  1709. - (void)setLabel:(NSString *)label {
  1710. }
  1711. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry {
  1712. self = [super init];
  1713. _entry = entry;
  1714. _entry->library = self;
  1715. return self;
  1716. }
  1717. - (void)dealloc {
  1718. _entry->notify_free();
  1719. }
  1720. @end
  1721. @implementation MDImmediateLibrary
  1722. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1723. device:(id<MTLDevice>)device
  1724. source:(NSString *)source
  1725. options:(MTLCompileOptions *)options {
  1726. self = [super initWithCacheEntry:entry];
  1727. _complete = false;
  1728. _ready = false;
  1729. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1730. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1731. "shader_name=%{public}s stage=%{public}s hash=%X",
  1732. entry->name.get_data(), SHADER_STAGE_NAMES[entry->stage], entry->key.short_sha());
  1733. [device newLibraryWithSource:source
  1734. options:options
  1735. completionHandler:^(id<MTLLibrary> library, NSError *error) {
  1736. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1737. self->_library = library;
  1738. self->_error = error;
  1739. if (error) {
  1740. ERR_PRINT(vformat(U"Error compiling shader %s: %s", entry->name.get_data(), error.localizedDescription.UTF8String));
  1741. }
  1742. {
  1743. std::lock_guard<std::mutex> lock(self->_cv_mutex);
  1744. _ready = true;
  1745. }
  1746. _cv.notify_all();
  1747. _complete = true;
  1748. }];
  1749. return self;
  1750. }
  1751. - (id<MTLLibrary>)library {
  1752. if (!_complete) {
  1753. std::unique_lock<std::mutex> lock(_cv_mutex);
  1754. _cv.wait(lock, [&] { return _ready; });
  1755. }
  1756. return _library;
  1757. }
  1758. - (NSError *)error {
  1759. if (!_complete) {
  1760. std::unique_lock<std::mutex> lock(_cv_mutex);
  1761. _cv.wait(lock, [&] { return _ready; });
  1762. }
  1763. return _error;
  1764. }
  1765. @end
  1766. @implementation MDLazyLibrary
  1767. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1768. device:(id<MTLDevice>)device
  1769. source:(NSString *)source
  1770. options:(MTLCompileOptions *)options {
  1771. self = [super initWithCacheEntry:entry];
  1772. _device = device;
  1773. _source = source;
  1774. _options = options;
  1775. return self;
  1776. }
  1777. - (void)load {
  1778. {
  1779. std::shared_lock<std::shared_mutex> lock(_mu);
  1780. if (_loaded) {
  1781. return;
  1782. }
  1783. }
  1784. std::unique_lock<std::shared_mutex> lock(_mu);
  1785. if (_loaded) {
  1786. return;
  1787. }
  1788. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1789. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1790. "shader_name=%{public}s stage=%{public}s hash=%X",
  1791. _entry->name.get_data(), SHADER_STAGE_NAMES[_entry->stage], _entry->key.short_sha());
  1792. NSError *error;
  1793. _library = [_device newLibraryWithSource:_source options:_options error:&error];
  1794. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1795. _device = nil;
  1796. _source = nil;
  1797. _options = nil;
  1798. _loaded = true;
  1799. }
  1800. - (id<MTLLibrary>)library {
  1801. [self load];
  1802. return _library;
  1803. }
  1804. - (NSError *)error {
  1805. [self load];
  1806. return _error;
  1807. }
  1808. @end