metal_objects.mm 74 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116
  1. /**************************************************************************/
  2. /* metal_objects.mm */
  3. /**************************************************************************/
  4. /* This file is part of: */
  5. /* GODOT ENGINE */
  6. /* https://godotengine.org */
  7. /**************************************************************************/
  8. /* Copyright (c) 2014-present Godot Engine contributors (see AUTHORS.md). */
  9. /* Copyright (c) 2007-2014 Juan Linietsky, Ariel Manzur. */
  10. /* */
  11. /* Permission is hereby granted, free of charge, to any person obtaining */
  12. /* a copy of this software and associated documentation files (the */
  13. /* "Software"), to deal in the Software without restriction, including */
  14. /* without limitation the rights to use, copy, modify, merge, publish, */
  15. /* distribute, sublicense, and/or sell copies of the Software, and to */
  16. /* permit persons to whom the Software is furnished to do so, subject to */
  17. /* the following conditions: */
  18. /* */
  19. /* The above copyright notice and this permission notice shall be */
  20. /* included in all copies or substantial portions of the Software. */
  21. /* */
  22. /* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, */
  23. /* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF */
  24. /* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. */
  25. /* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY */
  26. /* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, */
  27. /* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE */
  28. /* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
  29. /**************************************************************************/
  30. /**************************************************************************/
  31. /* */
  32. /* Portions of this code were derived from MoltenVK. */
  33. /* */
  34. /* Copyright (c) 2015-2023 The Brenwill Workshop Ltd. */
  35. /* (http://www.brenwill.com) */
  36. /* */
  37. /* Licensed under the Apache License, Version 2.0 (the "License"); */
  38. /* you may not use this file except in compliance with the License. */
  39. /* You may obtain a copy of the License at */
  40. /* */
  41. /* http://www.apache.org/licenses/LICENSE-2.0 */
  42. /* */
  43. /* Unless required by applicable law or agreed to in writing, software */
  44. /* distributed under the License is distributed on an "AS IS" BASIS, */
  45. /* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or */
  46. /* implied. See the License for the specific language governing */
  47. /* permissions and limitations under the License. */
  48. /**************************************************************************/
  49. #import "metal_objects.h"
  50. #import "metal_utils.h"
  51. #import "pixel_formats.h"
  52. #import "rendering_device_driver_metal.h"
  53. #import <os/signpost.h>
  54. // We have to undefine these macros because they are defined in NSObjCRuntime.h.
  55. #undef MIN
  56. #undef MAX
  57. void MDCommandBuffer::begin() {
  58. DEV_ASSERT(commandBuffer == nil);
  59. commandBuffer = queue.commandBuffer;
  60. }
  61. void MDCommandBuffer::end() {
  62. switch (type) {
  63. case MDCommandBufferStateType::None:
  64. return;
  65. case MDCommandBufferStateType::Render:
  66. return render_end_pass();
  67. case MDCommandBufferStateType::Compute:
  68. return _end_compute_dispatch();
  69. case MDCommandBufferStateType::Blit:
  70. return _end_blit();
  71. }
  72. }
  73. void MDCommandBuffer::commit() {
  74. end();
  75. [commandBuffer commit];
  76. commandBuffer = nil;
  77. }
  78. void MDCommandBuffer::bind_pipeline(RDD::PipelineID p_pipeline) {
  79. MDPipeline *p = (MDPipeline *)(p_pipeline.id);
  80. // End current encoder if it is a compute encoder or blit encoder,
  81. // as they do not have a defined end boundary in the RDD like render.
  82. if (type == MDCommandBufferStateType::Compute) {
  83. _end_compute_dispatch();
  84. } else if (type == MDCommandBufferStateType::Blit) {
  85. _end_blit();
  86. }
  87. if (p->type == MDPipelineType::Render) {
  88. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  89. MDRenderPipeline *rp = (MDRenderPipeline *)p;
  90. if (render.encoder == nil) {
  91. // This error would happen if the render pass failed.
  92. ERR_FAIL_NULL_MSG(render.desc, "Render pass descriptor is null.");
  93. // This condition occurs when there are no attachments when calling render_next_subpass()
  94. // and is due to the SUPPORTS_FRAGMENT_SHADER_WITH_ONLY_SIDE_EFFECTS flag.
  95. render.desc.defaultRasterSampleCount = static_cast<NSUInteger>(rp->sample_count);
  96. // NOTE(sgc): This is to test rdar://FB13605547 and will be deleted once fix is confirmed.
  97. #if 0
  98. if (render.pipeline->sample_count == 4) {
  99. static id<MTLTexture> tex = nil;
  100. static id<MTLTexture> res_tex = nil;
  101. static dispatch_once_t onceToken;
  102. dispatch_once(&onceToken, ^{
  103. Size2i sz = render.frameBuffer->size;
  104. MTLTextureDescriptor *td = [MTLTextureDescriptor texture2DDescriptorWithPixelFormat:MTLPixelFormatRGBA8Unorm width:sz.width height:sz.height mipmapped:NO];
  105. td.textureType = MTLTextureType2DMultisample;
  106. td.storageMode = MTLStorageModeMemoryless;
  107. td.usage = MTLTextureUsageRenderTarget;
  108. td.sampleCount = render.pipeline->sample_count;
  109. tex = [device_driver->get_device() newTextureWithDescriptor:td];
  110. td.textureType = MTLTextureType2D;
  111. td.storageMode = MTLStorageModePrivate;
  112. td.usage = MTLTextureUsageShaderWrite;
  113. td.sampleCount = 1;
  114. res_tex = [device_driver->get_device() newTextureWithDescriptor:td];
  115. });
  116. render.desc.colorAttachments[0].texture = tex;
  117. render.desc.colorAttachments[0].loadAction = MTLLoadActionClear;
  118. render.desc.colorAttachments[0].storeAction = MTLStoreActionMultisampleResolve;
  119. render.desc.colorAttachments[0].resolveTexture = res_tex;
  120. }
  121. #endif
  122. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:render.desc];
  123. }
  124. if (render.pipeline != rp) {
  125. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_RASTER));
  126. // Mark all uniforms as dirty, as variants of a shader pipeline may have a different entry point ABI,
  127. // due to setting force_active_argument_buffer_resources = true for spirv_cross::CompilerMSL::Options.
  128. // As a result, uniform sets with the same layout will generate redundant binding warnings when
  129. // capturing a Metal frame in Xcode.
  130. //
  131. // If we don't mark as dirty, then some bindings will generate a validation error.
  132. render.mark_uniforms_dirty();
  133. if (render.pipeline != nullptr && render.pipeline->depth_stencil != rp->depth_stencil) {
  134. render.dirty.set_flag(RenderState::DIRTY_DEPTH);
  135. }
  136. if (rp->raster_state.blend.enabled) {
  137. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  138. }
  139. render.pipeline = rp;
  140. }
  141. } else if (p->type == MDPipelineType::Compute) {
  142. DEV_ASSERT(type == MDCommandBufferStateType::None);
  143. type = MDCommandBufferStateType::Compute;
  144. compute.pipeline = (MDComputePipeline *)p;
  145. compute.encoder = commandBuffer.computeCommandEncoder;
  146. [compute.encoder setComputePipelineState:compute.pipeline->state];
  147. }
  148. }
  149. id<MTLBlitCommandEncoder> MDCommandBuffer::blit_command_encoder() {
  150. switch (type) {
  151. case MDCommandBufferStateType::None:
  152. break;
  153. case MDCommandBufferStateType::Render:
  154. render_end_pass();
  155. break;
  156. case MDCommandBufferStateType::Compute:
  157. _end_compute_dispatch();
  158. break;
  159. case MDCommandBufferStateType::Blit:
  160. return blit.encoder;
  161. }
  162. type = MDCommandBufferStateType::Blit;
  163. blit.encoder = commandBuffer.blitCommandEncoder;
  164. return blit.encoder;
  165. }
  166. void MDCommandBuffer::encodeRenderCommandEncoderWithDescriptor(MTLRenderPassDescriptor *p_desc, NSString *p_label) {
  167. switch (type) {
  168. case MDCommandBufferStateType::None:
  169. break;
  170. case MDCommandBufferStateType::Render:
  171. render_end_pass();
  172. break;
  173. case MDCommandBufferStateType::Compute:
  174. _end_compute_dispatch();
  175. break;
  176. case MDCommandBufferStateType::Blit:
  177. _end_blit();
  178. break;
  179. }
  180. id<MTLRenderCommandEncoder> enc = [commandBuffer renderCommandEncoderWithDescriptor:p_desc];
  181. if (p_label != nil) {
  182. [enc pushDebugGroup:p_label];
  183. [enc popDebugGroup];
  184. }
  185. [enc endEncoding];
  186. }
  187. #pragma mark - Render Commands
  188. void MDCommandBuffer::render_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  189. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  190. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  191. if (render.uniform_sets.size() <= set->index) {
  192. uint32_t s = render.uniform_sets.size();
  193. render.uniform_sets.resize(set->index + 1);
  194. // Set intermediate values to null.
  195. std::fill(&render.uniform_sets[s], &render.uniform_sets[set->index] + 1, nullptr);
  196. }
  197. if (render.uniform_sets[set->index] != set) {
  198. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  199. render.uniform_set_mask |= 1ULL << set->index;
  200. render.uniform_sets[set->index] = set;
  201. }
  202. }
  203. void MDCommandBuffer::render_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  204. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  205. for (size_t i = 0u; i < p_set_count; ++i) {
  206. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  207. if (render.uniform_sets.size() <= set->index) {
  208. uint32_t s = render.uniform_sets.size();
  209. render.uniform_sets.resize(set->index + 1);
  210. // Set intermediate values to null.
  211. std::fill(&render.uniform_sets[s], &render.uniform_sets[set->index] + 1, nullptr);
  212. }
  213. if (render.uniform_sets[set->index] != set) {
  214. render.dirty.set_flag(RenderState::DIRTY_UNIFORMS);
  215. render.uniform_set_mask |= 1ULL << set->index;
  216. render.uniform_sets[set->index] = set;
  217. }
  218. }
  219. }
  220. void MDCommandBuffer::render_clear_attachments(VectorView<RDD::AttachmentClear> p_attachment_clears, VectorView<Rect2i> p_rects) {
  221. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  222. const MDSubpass &subpass = render.get_subpass();
  223. uint32_t vertex_count = p_rects.size() * 6 * subpass.view_count;
  224. simd::float4 *vertices = ALLOCA_ARRAY(simd::float4, vertex_count);
  225. simd::float4 clear_colors[ClearAttKey::ATTACHMENT_COUNT];
  226. Size2i size = render.frameBuffer->size;
  227. Rect2i render_area = render.clip_to_render_area({ { 0, 0 }, size });
  228. size = Size2i(render_area.position.x + render_area.size.width, render_area.position.y + render_area.size.height);
  229. _populate_vertices(vertices, size, p_rects);
  230. ClearAttKey key;
  231. key.sample_count = render.pass->get_sample_count();
  232. if (subpass.view_count > 1) {
  233. key.enable_layered_rendering();
  234. }
  235. float depth_value = 0;
  236. uint32_t stencil_value = 0;
  237. for (uint32_t i = 0; i < p_attachment_clears.size(); i++) {
  238. RDD::AttachmentClear const &attClear = p_attachment_clears[i];
  239. uint32_t attachment_index;
  240. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  241. attachment_index = attClear.color_attachment;
  242. } else {
  243. attachment_index = subpass.depth_stencil_reference.attachment;
  244. }
  245. MDAttachment const &mda = render.pass->attachments[attachment_index];
  246. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_COLOR_BIT)) {
  247. key.set_color_format(attachment_index, mda.format);
  248. clear_colors[attachment_index] = {
  249. attClear.value.color.r,
  250. attClear.value.color.g,
  251. attClear.value.color.b,
  252. attClear.value.color.a
  253. };
  254. }
  255. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT)) {
  256. key.set_depth_format(mda.format);
  257. depth_value = attClear.value.depth;
  258. }
  259. if (attClear.aspect.has_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT)) {
  260. key.set_stencil_format(mda.format);
  261. stencil_value = attClear.value.stencil;
  262. }
  263. }
  264. clear_colors[ClearAttKey::DEPTH_INDEX] = {
  265. depth_value,
  266. depth_value,
  267. depth_value,
  268. depth_value
  269. };
  270. id<MTLRenderCommandEncoder> enc = render.encoder;
  271. MDResourceCache &cache = device_driver->get_resource_cache();
  272. [enc pushDebugGroup:@"ClearAttachments"];
  273. [enc setRenderPipelineState:cache.get_clear_render_pipeline_state(key, nil)];
  274. [enc setDepthStencilState:cache.get_depth_stencil_state(
  275. key.is_depth_enabled(),
  276. key.is_stencil_enabled())];
  277. [enc setStencilReferenceValue:stencil_value];
  278. [enc setCullMode:MTLCullModeNone];
  279. [enc setTriangleFillMode:MTLTriangleFillModeFill];
  280. [enc setDepthBias:0 slopeScale:0 clamp:0];
  281. [enc setViewport:{ 0, 0, (double)size.width, (double)size.height, 0.0, 1.0 }];
  282. [enc setScissorRect:{ 0, 0, (NSUInteger)size.width, (NSUInteger)size.height }];
  283. [enc setVertexBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  284. [enc setFragmentBytes:clear_colors length:sizeof(clear_colors) atIndex:0];
  285. [enc setVertexBytes:vertices length:vertex_count * sizeof(vertices[0]) atIndex:device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX)];
  286. [enc drawPrimitives:MTLPrimitiveTypeTriangle vertexStart:0 vertexCount:vertex_count];
  287. [enc popDebugGroup];
  288. render.dirty.set_flag((RenderState::DirtyFlag)(RenderState::DIRTY_PIPELINE | RenderState::DIRTY_DEPTH | RenderState::DIRTY_RASTER));
  289. render.mark_uniforms_dirty({ 0 }); // Mark index 0 dirty, if there is already a binding for index 0.
  290. render.mark_viewport_dirty();
  291. render.mark_scissors_dirty();
  292. render.mark_vertex_dirty();
  293. render.mark_blend_dirty();
  294. }
  295. void MDCommandBuffer::_render_set_dirty_state() {
  296. _render_bind_uniform_sets();
  297. MDSubpass const &subpass = render.get_subpass();
  298. if (subpass.view_count > 1) {
  299. uint32_t view_range[2] = { 0, subpass.view_count };
  300. [render.encoder setVertexBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  301. [render.encoder setFragmentBytes:view_range length:sizeof(view_range) atIndex:VIEW_MASK_BUFFER_INDEX];
  302. }
  303. if (render.dirty.has_flag(RenderState::DIRTY_PIPELINE)) {
  304. [render.encoder setRenderPipelineState:render.pipeline->state];
  305. }
  306. if (render.dirty.has_flag(RenderState::DIRTY_VIEWPORT)) {
  307. [render.encoder setViewports:render.viewports.ptr() count:render.viewports.size()];
  308. }
  309. if (render.dirty.has_flag(RenderState::DIRTY_DEPTH)) {
  310. [render.encoder setDepthStencilState:render.pipeline->depth_stencil];
  311. }
  312. if (render.dirty.has_flag(RenderState::DIRTY_RASTER)) {
  313. render.pipeline->raster_state.apply(render.encoder);
  314. }
  315. if (render.dirty.has_flag(RenderState::DIRTY_SCISSOR) && !render.scissors.is_empty()) {
  316. size_t len = render.scissors.size();
  317. MTLScissorRect *rects = ALLOCA_ARRAY(MTLScissorRect, len);
  318. for (size_t i = 0; i < len; i++) {
  319. rects[i] = render.clip_to_render_area(render.scissors[i]);
  320. }
  321. [render.encoder setScissorRects:rects count:len];
  322. }
  323. if (render.dirty.has_flag(RenderState::DIRTY_BLEND) && render.blend_constants.has_value()) {
  324. [render.encoder setBlendColorRed:render.blend_constants->r green:render.blend_constants->g blue:render.blend_constants->b alpha:render.blend_constants->a];
  325. }
  326. if (render.dirty.has_flag(RenderState::DIRTY_VERTEX)) {
  327. uint32_t p_binding_count = render.vertex_buffers.size();
  328. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  329. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  330. offsets:render.vertex_offsets.ptr()
  331. withRange:NSMakeRange(first, p_binding_count)];
  332. }
  333. render.dirty.clear();
  334. }
  335. void MDCommandBuffer::render_set_viewport(VectorView<Rect2i> p_viewports) {
  336. render.viewports.resize(p_viewports.size());
  337. for (uint32_t i = 0; i < p_viewports.size(); i += 1) {
  338. Rect2i const &vp = p_viewports[i];
  339. render.viewports[i] = {
  340. .originX = static_cast<double>(vp.position.x),
  341. .originY = static_cast<double>(vp.position.y),
  342. .width = static_cast<double>(vp.size.width),
  343. .height = static_cast<double>(vp.size.height),
  344. .znear = 0.0,
  345. .zfar = 1.0,
  346. };
  347. }
  348. render.dirty.set_flag(RenderState::DIRTY_VIEWPORT);
  349. }
  350. void MDCommandBuffer::render_set_scissor(VectorView<Rect2i> p_scissors) {
  351. render.scissors.resize(p_scissors.size());
  352. for (uint32_t i = 0; i < p_scissors.size(); i += 1) {
  353. Rect2i const &vp = p_scissors[i];
  354. render.scissors[i] = {
  355. .x = static_cast<NSUInteger>(vp.position.x),
  356. .y = static_cast<NSUInteger>(vp.position.y),
  357. .width = static_cast<NSUInteger>(vp.size.width),
  358. .height = static_cast<NSUInteger>(vp.size.height),
  359. };
  360. }
  361. render.dirty.set_flag(RenderState::DIRTY_SCISSOR);
  362. }
  363. void MDCommandBuffer::render_set_blend_constants(const Color &p_constants) {
  364. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  365. if (render.blend_constants != p_constants) {
  366. render.blend_constants = p_constants;
  367. render.dirty.set_flag(RenderState::DIRTY_BLEND);
  368. }
  369. }
  370. void BoundUniformSet::merge_into(ResourceUsageMap &p_dst) const {
  371. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : usage_to_resources) {
  372. ResourceVector *resources = p_dst.getptr(keyval.key);
  373. if (resources == nullptr) {
  374. resources = &p_dst.insert(keyval.key, ResourceVector())->value;
  375. }
  376. // Reserve space for the new resources, assuming they are all added.
  377. resources->reserve(resources->size() + keyval.value.size());
  378. uint32_t i = 0, j = 0;
  379. __unsafe_unretained id<MTLResource> *resources_ptr = resources->ptr();
  380. const __unsafe_unretained id<MTLResource> *keyval_ptr = keyval.value.ptr();
  381. // 2-way merge.
  382. while (i < resources->size() && j < keyval.value.size()) {
  383. if (resources_ptr[i] < keyval_ptr[j]) {
  384. i++;
  385. } else if (resources_ptr[i] > keyval_ptr[j]) {
  386. resources->insert(i, keyval_ptr[j]);
  387. i++;
  388. j++;
  389. } else {
  390. i++;
  391. j++;
  392. }
  393. }
  394. // Append the remaining resources.
  395. for (; j < keyval.value.size(); j++) {
  396. resources->push_back(keyval_ptr[j]);
  397. }
  398. }
  399. }
  400. void MDCommandBuffer::_render_bind_uniform_sets() {
  401. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  402. if (!render.dirty.has_flag(RenderState::DIRTY_UNIFORMS)) {
  403. return;
  404. }
  405. render.dirty.clear_flag(RenderState::DIRTY_UNIFORMS);
  406. uint64_t set_uniforms = render.uniform_set_mask;
  407. render.uniform_set_mask = 0;
  408. MDRenderShader *shader = render.pipeline->shader;
  409. while (set_uniforms != 0) {
  410. // Find the index of the next set bit.
  411. int index = __builtin_ctzll(set_uniforms);
  412. // Clear the set bit.
  413. set_uniforms &= (set_uniforms - 1);
  414. MDUniformSet *set = render.uniform_sets[index];
  415. if (set == nullptr || set->index >= (uint32_t)shader->sets.size()) {
  416. continue;
  417. }
  418. set->bind_uniforms(shader, render);
  419. }
  420. }
  421. void MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, Size2i p_fb_size, VectorView<Rect2i> p_rects) {
  422. uint32_t idx = 0;
  423. for (uint32_t i = 0; i < p_rects.size(); i++) {
  424. Rect2i const &rect = p_rects[i];
  425. idx = _populate_vertices(p_vertices, idx, rect, p_fb_size);
  426. }
  427. }
  428. uint32_t MDCommandBuffer::_populate_vertices(simd::float4 *p_vertices, uint32_t p_index, Rect2i const &p_rect, Size2i p_fb_size) {
  429. // Determine the positions of the four edges of the
  430. // clear rectangle as a fraction of the attachment size.
  431. float leftPos = (float)(p_rect.position.x) / (float)p_fb_size.width;
  432. float rightPos = (float)(p_rect.size.width) / (float)p_fb_size.width + leftPos;
  433. float bottomPos = (float)(p_rect.position.y) / (float)p_fb_size.height;
  434. float topPos = (float)(p_rect.size.height) / (float)p_fb_size.height + bottomPos;
  435. // Transform to clip-space coordinates, which are bounded by (-1.0 < p < 1.0) in clip-space.
  436. leftPos = (leftPos * 2.0f) - 1.0f;
  437. rightPos = (rightPos * 2.0f) - 1.0f;
  438. bottomPos = (bottomPos * 2.0f) - 1.0f;
  439. topPos = (topPos * 2.0f) - 1.0f;
  440. simd::float4 vtx;
  441. uint32_t idx = p_index;
  442. uint32_t endLayer = render.get_subpass().view_count;
  443. for (uint32_t layer = 0; layer < endLayer; layer++) {
  444. vtx.z = 0.0;
  445. vtx.w = (float)layer;
  446. // Top left vertex - First triangle.
  447. vtx.y = topPos;
  448. vtx.x = leftPos;
  449. p_vertices[idx++] = vtx;
  450. // Bottom left vertex.
  451. vtx.y = bottomPos;
  452. vtx.x = leftPos;
  453. p_vertices[idx++] = vtx;
  454. // Bottom right vertex.
  455. vtx.y = bottomPos;
  456. vtx.x = rightPos;
  457. p_vertices[idx++] = vtx;
  458. // Bottom right vertex - Second triangle.
  459. p_vertices[idx++] = vtx;
  460. // Top right vertex.
  461. vtx.y = topPos;
  462. vtx.x = rightPos;
  463. p_vertices[idx++] = vtx;
  464. // Top left vertex.
  465. vtx.y = topPos;
  466. vtx.x = leftPos;
  467. p_vertices[idx++] = vtx;
  468. }
  469. return idx;
  470. }
  471. void MDCommandBuffer::render_begin_pass(RDD::RenderPassID p_render_pass, RDD::FramebufferID p_frameBuffer, RDD::CommandBufferType p_cmd_buffer_type, const Rect2i &p_rect, VectorView<RDD::RenderPassClearValue> p_clear_values) {
  472. DEV_ASSERT(commandBuffer != nil);
  473. end();
  474. MDRenderPass *pass = (MDRenderPass *)(p_render_pass.id);
  475. MDFrameBuffer *fb = (MDFrameBuffer *)(p_frameBuffer.id);
  476. type = MDCommandBufferStateType::Render;
  477. render.pass = pass;
  478. render.current_subpass = UINT32_MAX;
  479. render.render_area = p_rect;
  480. render.clear_values.resize(p_clear_values.size());
  481. for (uint32_t i = 0; i < p_clear_values.size(); i++) {
  482. render.clear_values[i] = p_clear_values[i];
  483. }
  484. render.is_rendering_entire_area = (p_rect.position == Point2i(0, 0)) && p_rect.size == fb->size;
  485. render.frameBuffer = fb;
  486. render_next_subpass();
  487. }
  488. void MDCommandBuffer::_end_render_pass() {
  489. MDFrameBuffer const &fb_info = *render.frameBuffer;
  490. MDSubpass const &subpass = render.get_subpass();
  491. PixelFormats &pf = device_driver->get_pixel_formats();
  492. for (uint32_t i = 0; i < subpass.resolve_references.size(); i++) {
  493. uint32_t color_index = subpass.color_references[i].attachment;
  494. uint32_t resolve_index = subpass.resolve_references[i].attachment;
  495. DEV_ASSERT((color_index == RDD::AttachmentReference::UNUSED) == (resolve_index == RDD::AttachmentReference::UNUSED));
  496. if (color_index == RDD::AttachmentReference::UNUSED || !fb_info.has_texture(color_index)) {
  497. continue;
  498. }
  499. id<MTLTexture> resolve_tex = fb_info.get_texture(resolve_index);
  500. CRASH_COND_MSG(!flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve), "not implemented: unresolvable texture types");
  501. // see: https://github.com/KhronosGroup/MoltenVK/blob/d20d13fe2735adb845636a81522df1b9d89c0fba/MoltenVK/MoltenVK/GPUObjects/MVKRenderPass.mm#L407
  502. }
  503. render.end_encoding();
  504. }
  505. void MDCommandBuffer::_render_clear_render_area() {
  506. MDRenderPass const &pass = *render.pass;
  507. MDSubpass const &subpass = render.get_subpass();
  508. // First determine attachments that should be cleared.
  509. LocalVector<RDD::AttachmentClear> clears;
  510. clears.reserve(subpass.color_references.size() + /* possible depth stencil clear */ 1);
  511. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  512. uint32_t idx = subpass.color_references[i].attachment;
  513. if (idx != RDD::AttachmentReference::UNUSED && pass.attachments[idx].shouldClear(subpass, false)) {
  514. clears.push_back({ .aspect = RDD::TEXTURE_ASPECT_COLOR_BIT, .color_attachment = idx, .value = render.clear_values[idx] });
  515. }
  516. }
  517. uint32_t ds_index = subpass.depth_stencil_reference.attachment;
  518. bool shouldClearDepth = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, false));
  519. bool shouldClearStencil = (ds_index != RDD::AttachmentReference::UNUSED && pass.attachments[ds_index].shouldClear(subpass, true));
  520. if (shouldClearDepth || shouldClearStencil) {
  521. MDAttachment const &attachment = pass.attachments[ds_index];
  522. BitField<RDD::TextureAspectBits> bits;
  523. if (shouldClearDepth && attachment.type & MDAttachmentType::Depth) {
  524. bits.set_flag(RDD::TEXTURE_ASPECT_DEPTH_BIT);
  525. }
  526. if (shouldClearStencil && attachment.type & MDAttachmentType::Stencil) {
  527. bits.set_flag(RDD::TEXTURE_ASPECT_STENCIL_BIT);
  528. }
  529. clears.push_back({ .aspect = bits, .color_attachment = ds_index, .value = render.clear_values[ds_index] });
  530. }
  531. if (clears.is_empty()) {
  532. return;
  533. }
  534. render_clear_attachments(clears, { render.render_area });
  535. }
  536. void MDCommandBuffer::render_next_subpass() {
  537. DEV_ASSERT(commandBuffer != nil);
  538. if (render.current_subpass == UINT32_MAX) {
  539. render.current_subpass = 0;
  540. } else {
  541. _end_render_pass();
  542. render.current_subpass++;
  543. }
  544. MDFrameBuffer const &fb = *render.frameBuffer;
  545. MDRenderPass const &pass = *render.pass;
  546. MDSubpass const &subpass = render.get_subpass();
  547. MTLRenderPassDescriptor *desc = MTLRenderPassDescriptor.renderPassDescriptor;
  548. if (subpass.view_count > 1) {
  549. desc.renderTargetArrayLength = subpass.view_count;
  550. }
  551. PixelFormats &pf = device_driver->get_pixel_formats();
  552. uint32_t attachmentCount = 0;
  553. for (uint32_t i = 0; i < subpass.color_references.size(); i++) {
  554. uint32_t idx = subpass.color_references[i].attachment;
  555. if (idx == RDD::AttachmentReference::UNUSED) {
  556. continue;
  557. }
  558. attachmentCount += 1;
  559. MTLRenderPassColorAttachmentDescriptor *ca = desc.colorAttachments[i];
  560. uint32_t resolveIdx = subpass.resolve_references.is_empty() ? RDD::AttachmentReference::UNUSED : subpass.resolve_references[i].attachment;
  561. bool has_resolve = resolveIdx != RDD::AttachmentReference::UNUSED;
  562. bool can_resolve = true;
  563. if (resolveIdx != RDD::AttachmentReference::UNUSED) {
  564. id<MTLTexture> resolve_tex = fb.get_texture(resolveIdx);
  565. can_resolve = flags::all(pf.getCapabilities(resolve_tex.pixelFormat), kMTLFmtCapsResolve);
  566. if (can_resolve) {
  567. ca.resolveTexture = resolve_tex;
  568. } else {
  569. CRASH_NOW_MSG("unimplemented: using a texture format that is not supported for resolve");
  570. }
  571. }
  572. MDAttachment const &attachment = pass.attachments[idx];
  573. id<MTLTexture> tex = fb.get_texture(idx);
  574. ERR_FAIL_NULL_MSG(tex, "Frame buffer color texture is null.");
  575. if ((attachment.type & MDAttachmentType::Color)) {
  576. if (attachment.configureDescriptor(ca, pf, subpass, tex, render.is_rendering_entire_area, has_resolve, can_resolve, false)) {
  577. Color clearColor = render.clear_values[idx].color;
  578. ca.clearColor = MTLClearColorMake(clearColor.r, clearColor.g, clearColor.b, clearColor.a);
  579. }
  580. }
  581. }
  582. if (subpass.depth_stencil_reference.attachment != RDD::AttachmentReference::UNUSED) {
  583. attachmentCount += 1;
  584. uint32_t idx = subpass.depth_stencil_reference.attachment;
  585. MDAttachment const &attachment = pass.attachments[idx];
  586. id<MTLTexture> tex = fb.get_texture(idx);
  587. ERR_FAIL_NULL_MSG(tex, "Frame buffer depth / stencil texture is null.");
  588. if (attachment.type & MDAttachmentType::Depth) {
  589. MTLRenderPassDepthAttachmentDescriptor *da = desc.depthAttachment;
  590. if (attachment.configureDescriptor(da, pf, subpass, tex, render.is_rendering_entire_area, false, false, false)) {
  591. da.clearDepth = render.clear_values[idx].depth;
  592. }
  593. }
  594. if (attachment.type & MDAttachmentType::Stencil) {
  595. MTLRenderPassStencilAttachmentDescriptor *sa = desc.stencilAttachment;
  596. if (attachment.configureDescriptor(sa, pf, subpass, tex, render.is_rendering_entire_area, false, false, true)) {
  597. sa.clearStencil = render.clear_values[idx].stencil;
  598. }
  599. }
  600. }
  601. desc.renderTargetWidth = MAX((NSUInteger)MIN(render.render_area.position.x + render.render_area.size.width, fb.size.width), 1u);
  602. desc.renderTargetHeight = MAX((NSUInteger)MIN(render.render_area.position.y + render.render_area.size.height, fb.size.height), 1u);
  603. if (attachmentCount == 0) {
  604. // If there are no attachments, delay the creation of the encoder,
  605. // so we can use a matching sample count for the pipeline, by setting
  606. // the defaultRasterSampleCount from the pipeline's sample count.
  607. render.desc = desc;
  608. } else {
  609. render.encoder = [commandBuffer renderCommandEncoderWithDescriptor:desc];
  610. if (!render.is_rendering_entire_area) {
  611. _render_clear_render_area();
  612. }
  613. // With a new encoder, all state is dirty.
  614. render.dirty.set_flag(RenderState::DIRTY_ALL);
  615. }
  616. }
  617. void MDCommandBuffer::render_draw(uint32_t p_vertex_count,
  618. uint32_t p_instance_count,
  619. uint32_t p_base_vertex,
  620. uint32_t p_first_instance) {
  621. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  622. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  623. _render_set_dirty_state();
  624. MDSubpass const &subpass = render.get_subpass();
  625. if (subpass.view_count > 1) {
  626. p_instance_count *= subpass.view_count;
  627. }
  628. DEV_ASSERT(render.dirty == 0);
  629. id<MTLRenderCommandEncoder> enc = render.encoder;
  630. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  631. vertexStart:p_base_vertex
  632. vertexCount:p_vertex_count
  633. instanceCount:p_instance_count
  634. baseInstance:p_first_instance];
  635. }
  636. void MDCommandBuffer::render_bind_vertex_buffers(uint32_t p_binding_count, const RDD::BufferID *p_buffers, const uint64_t *p_offsets) {
  637. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  638. render.vertex_buffers.resize(p_binding_count);
  639. render.vertex_offsets.resize(p_binding_count);
  640. // Reverse the buffers, as their bindings are assigned in descending order.
  641. for (uint32_t i = 0; i < p_binding_count; i += 1) {
  642. render.vertex_buffers[i] = rid::get(p_buffers[p_binding_count - i - 1]);
  643. render.vertex_offsets[i] = p_offsets[p_binding_count - i - 1];
  644. }
  645. if (render.encoder) {
  646. uint32_t first = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(p_binding_count - 1);
  647. [render.encoder setVertexBuffers:render.vertex_buffers.ptr()
  648. offsets:render.vertex_offsets.ptr()
  649. withRange:NSMakeRange(first, p_binding_count)];
  650. render.dirty.clear_flag(RenderState::DIRTY_VERTEX);
  651. } else {
  652. render.dirty.set_flag(RenderState::DIRTY_VERTEX);
  653. }
  654. }
  655. void MDCommandBuffer::render_bind_index_buffer(RDD::BufferID p_buffer, RDD::IndexBufferFormat p_format, uint64_t p_offset) {
  656. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  657. render.index_buffer = rid::get(p_buffer);
  658. render.index_type = p_format == RDD::IndexBufferFormat::INDEX_BUFFER_FORMAT_UINT16 ? MTLIndexTypeUInt16 : MTLIndexTypeUInt32;
  659. render.index_offset = p_offset;
  660. }
  661. void MDCommandBuffer::render_draw_indexed(uint32_t p_index_count,
  662. uint32_t p_instance_count,
  663. uint32_t p_first_index,
  664. int32_t p_vertex_offset,
  665. uint32_t p_first_instance) {
  666. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  667. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  668. _render_set_dirty_state();
  669. MDSubpass const &subpass = render.get_subpass();
  670. if (subpass.view_count > 1) {
  671. p_instance_count *= subpass.view_count;
  672. }
  673. id<MTLRenderCommandEncoder> enc = render.encoder;
  674. uint32_t index_offset = render.index_offset;
  675. index_offset += p_first_index * (render.index_type == MTLIndexTypeUInt16 ? sizeof(uint16_t) : sizeof(uint32_t));
  676. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  677. indexCount:p_index_count
  678. indexType:render.index_type
  679. indexBuffer:render.index_buffer
  680. indexBufferOffset:index_offset
  681. instanceCount:p_instance_count
  682. baseVertex:p_vertex_offset
  683. baseInstance:p_first_instance];
  684. }
  685. void MDCommandBuffer::render_draw_indexed_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  686. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  687. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  688. _render_set_dirty_state();
  689. id<MTLRenderCommandEncoder> enc = render.encoder;
  690. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  691. NSUInteger indirect_offset = p_offset;
  692. for (uint32_t i = 0; i < p_draw_count; i++) {
  693. [enc drawIndexedPrimitives:render.pipeline->raster_state.render_primitive
  694. indexType:render.index_type
  695. indexBuffer:render.index_buffer
  696. indexBufferOffset:0
  697. indirectBuffer:indirect_buffer
  698. indirectBufferOffset:indirect_offset];
  699. indirect_offset += p_stride;
  700. }
  701. }
  702. void MDCommandBuffer::render_draw_indexed_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  703. ERR_FAIL_MSG("not implemented");
  704. }
  705. void MDCommandBuffer::render_draw_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset, uint32_t p_draw_count, uint32_t p_stride) {
  706. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  707. ERR_FAIL_NULL_MSG(render.pipeline, "No pipeline set for render command buffer.");
  708. _render_set_dirty_state();
  709. id<MTLRenderCommandEncoder> enc = render.encoder;
  710. id<MTLBuffer> indirect_buffer = rid::get(p_indirect_buffer);
  711. NSUInteger indirect_offset = p_offset;
  712. for (uint32_t i = 0; i < p_draw_count; i++) {
  713. [enc drawPrimitives:render.pipeline->raster_state.render_primitive
  714. indirectBuffer:indirect_buffer
  715. indirectBufferOffset:indirect_offset];
  716. indirect_offset += p_stride;
  717. }
  718. }
  719. void MDCommandBuffer::render_draw_indirect_count(RDD::BufferID p_indirect_buffer, uint64_t p_offset, RDD::BufferID p_count_buffer, uint64_t p_count_buffer_offset, uint32_t p_max_draw_count, uint32_t p_stride) {
  720. ERR_FAIL_MSG("not implemented");
  721. }
  722. void MDCommandBuffer::render_end_pass() {
  723. DEV_ASSERT(type == MDCommandBufferStateType::Render);
  724. render.end_encoding();
  725. render.reset();
  726. type = MDCommandBufferStateType::None;
  727. }
  728. #pragma mark - RenderState
  729. void MDCommandBuffer::RenderState::reset() {
  730. pass = nil;
  731. frameBuffer = nil;
  732. pipeline = nil;
  733. current_subpass = UINT32_MAX;
  734. render_area = {};
  735. is_rendering_entire_area = false;
  736. desc = nil;
  737. encoder = nil;
  738. index_buffer = nil;
  739. index_type = MTLIndexTypeUInt16;
  740. dirty = DIRTY_NONE;
  741. uniform_sets.clear();
  742. uniform_set_mask = 0;
  743. clear_values.clear();
  744. viewports.clear();
  745. scissors.clear();
  746. blend_constants.reset();
  747. vertex_buffers.clear();
  748. vertex_offsets.clear();
  749. // Keep the keys, as they are likely to be used again.
  750. for (KeyValue<StageResourceUsage, LocalVector<__unsafe_unretained id<MTLResource>>> &kv : resource_usage) {
  751. kv.value.clear();
  752. }
  753. }
  754. void MDCommandBuffer::RenderState::end_encoding() {
  755. if (encoder == nil) {
  756. return;
  757. }
  758. // Bind all resources.
  759. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  760. if (keyval.value.is_empty()) {
  761. continue;
  762. }
  763. MTLResourceUsage vert_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_VERTEX);
  764. MTLResourceUsage frag_usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_FRAGMENT);
  765. if (vert_usage == frag_usage) {
  766. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex | MTLRenderStageFragment];
  767. } else {
  768. if (vert_usage != 0) {
  769. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:vert_usage stages:MTLRenderStageVertex];
  770. }
  771. if (frag_usage != 0) {
  772. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:frag_usage stages:MTLRenderStageFragment];
  773. }
  774. }
  775. }
  776. [encoder endEncoding];
  777. encoder = nil;
  778. }
  779. #pragma mark - ComputeState
  780. void MDCommandBuffer::ComputeState::end_encoding() {
  781. if (encoder == nil) {
  782. return;
  783. }
  784. // Bind all resources.
  785. for (KeyValue<StageResourceUsage, ResourceVector> const &keyval : resource_usage) {
  786. if (keyval.value.is_empty()) {
  787. continue;
  788. }
  789. MTLResourceUsage usage = resource_usage_for_stage(keyval.key, RDD::ShaderStage::SHADER_STAGE_COMPUTE);
  790. if (usage != 0) {
  791. [encoder useResources:keyval.value.ptr() count:keyval.value.size() usage:usage];
  792. }
  793. }
  794. [encoder endEncoding];
  795. encoder = nil;
  796. }
  797. #pragma mark - Compute
  798. void MDCommandBuffer::compute_bind_uniform_set(RDD::UniformSetID p_uniform_set, RDD::ShaderID p_shader, uint32_t p_set_index) {
  799. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  800. MDShader *shader = (MDShader *)(p_shader.id);
  801. MDUniformSet *set = (MDUniformSet *)(p_uniform_set.id);
  802. set->bind_uniforms(shader, compute);
  803. }
  804. void MDCommandBuffer::compute_bind_uniform_sets(VectorView<RDD::UniformSetID> p_uniform_sets, RDD::ShaderID p_shader, uint32_t p_first_set_index, uint32_t p_set_count) {
  805. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  806. MDShader *shader = (MDShader *)(p_shader.id);
  807. // TODO(sgc): Bind multiple buffers using [encoder setBuffers:offsets:withRange:]
  808. for (size_t i = 0u; i < p_set_count; ++i) {
  809. MDUniformSet *set = (MDUniformSet *)(p_uniform_sets[i].id);
  810. set->bind_uniforms(shader, compute);
  811. }
  812. }
  813. void MDCommandBuffer::compute_dispatch(uint32_t p_x_groups, uint32_t p_y_groups, uint32_t p_z_groups) {
  814. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  815. MTLRegion region = MTLRegionMake3D(0, 0, 0, p_x_groups, p_y_groups, p_z_groups);
  816. id<MTLComputeCommandEncoder> enc = compute.encoder;
  817. [enc dispatchThreadgroups:region.size threadsPerThreadgroup:compute.pipeline->compute_state.local];
  818. }
  819. void MDCommandBuffer::compute_dispatch_indirect(RDD::BufferID p_indirect_buffer, uint64_t p_offset) {
  820. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  821. id<MTLBuffer> indirectBuffer = rid::get(p_indirect_buffer);
  822. id<MTLComputeCommandEncoder> enc = compute.encoder;
  823. [enc dispatchThreadgroupsWithIndirectBuffer:indirectBuffer indirectBufferOffset:p_offset threadsPerThreadgroup:compute.pipeline->compute_state.local];
  824. }
  825. void MDCommandBuffer::_end_compute_dispatch() {
  826. DEV_ASSERT(type == MDCommandBufferStateType::Compute);
  827. compute.end_encoding();
  828. compute.reset();
  829. type = MDCommandBufferStateType::None;
  830. }
  831. void MDCommandBuffer::_end_blit() {
  832. DEV_ASSERT(type == MDCommandBufferStateType::Blit);
  833. [blit.encoder endEncoding];
  834. blit.reset();
  835. type = MDCommandBufferStateType::None;
  836. }
  837. MDComputeShader::MDComputeShader(CharString p_name,
  838. Vector<UniformSet> p_sets,
  839. bool p_uses_argument_buffers,
  840. MDLibrary *p_kernel) :
  841. MDShader(p_name, p_sets, p_uses_argument_buffers), kernel(p_kernel) {
  842. }
  843. void MDComputeShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  844. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Compute);
  845. if (push_constants.binding == (uint32_t)-1) {
  846. return;
  847. }
  848. id<MTLComputeCommandEncoder> enc = p_cb->compute.encoder;
  849. void const *ptr = p_data.ptr();
  850. size_t length = p_data.size() * sizeof(uint32_t);
  851. [enc setBytes:ptr length:length atIndex:push_constants.binding];
  852. }
  853. MDRenderShader::MDRenderShader(CharString p_name,
  854. Vector<UniformSet> p_sets,
  855. bool p_needs_view_mask_buffer,
  856. bool p_uses_argument_buffers,
  857. MDLibrary *_Nonnull p_vert, MDLibrary *_Nonnull p_frag) :
  858. MDShader(p_name, p_sets, p_uses_argument_buffers),
  859. needs_view_mask_buffer(p_needs_view_mask_buffer),
  860. vert(p_vert),
  861. frag(p_frag) {
  862. }
  863. void MDRenderShader::encode_push_constant_data(VectorView<uint32_t> p_data, MDCommandBuffer *p_cb) {
  864. DEV_ASSERT(p_cb->type == MDCommandBufferStateType::Render);
  865. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_cb->render.encoder;
  866. void const *ptr = p_data.ptr();
  867. size_t length = p_data.size() * sizeof(uint32_t);
  868. if (push_constants.vert.binding > -1) {
  869. [enc setVertexBytes:ptr length:length atIndex:push_constants.vert.binding];
  870. }
  871. if (push_constants.frag.binding > -1) {
  872. [enc setFragmentBytes:ptr length:length atIndex:push_constants.frag.binding];
  873. }
  874. }
  875. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  876. DEV_ASSERT(p_shader->uses_argument_buffers);
  877. DEV_ASSERT(p_state.encoder != nil);
  878. UniformSet const &set_info = p_shader->sets[index];
  879. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  880. id<MTLDevice> __unsafe_unretained device = enc.device;
  881. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage);
  882. // Set the buffer for the vertex stage.
  883. {
  884. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_VERTEX);
  885. if (offset) {
  886. [enc setVertexBuffer:bus.buffer offset:*offset atIndex:index];
  887. }
  888. }
  889. // Set the buffer for the fragment stage.
  890. {
  891. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_FRAGMENT);
  892. if (offset) {
  893. [enc setFragmentBuffer:bus.buffer offset:*offset atIndex:index];
  894. }
  895. }
  896. }
  897. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  898. DEV_ASSERT(!p_shader->uses_argument_buffers);
  899. DEV_ASSERT(p_state.encoder != nil);
  900. id<MTLRenderCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  901. UniformSet const &set = p_shader->sets[index];
  902. for (uint32_t i = 0; i < MIN(uniforms.size(), set.uniforms.size()); i++) {
  903. RDD::BoundUniform const &uniform = uniforms[i];
  904. UniformInfo ui = set.uniforms[i];
  905. static const RDC::ShaderStage stage_usages[2] = { RDC::ShaderStage::SHADER_STAGE_VERTEX, RDC::ShaderStage::SHADER_STAGE_FRAGMENT };
  906. for (const RDC::ShaderStage stage : stage_usages) {
  907. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  908. BindingInfo *bi = ui.bindings.getptr(stage);
  909. if (bi == nullptr) {
  910. // No binding for this stage.
  911. continue;
  912. }
  913. if ((ui.active_stages & stage_usage) == 0) {
  914. // Not active for this state, so don't bind anything.
  915. continue;
  916. }
  917. switch (uniform.type) {
  918. case RDD::UNIFORM_TYPE_SAMPLER: {
  919. size_t count = uniform.ids.size();
  920. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  921. for (size_t j = 0; j < count; j += 1) {
  922. objects[j] = rid::get(uniform.ids[j].id);
  923. }
  924. if (stage == RDD::SHADER_STAGE_VERTEX) {
  925. [enc setVertexSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  926. } else {
  927. [enc setFragmentSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  928. }
  929. } break;
  930. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  931. size_t count = uniform.ids.size() / 2;
  932. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  933. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  934. for (uint32_t j = 0; j < count; j += 1) {
  935. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  936. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  937. samplers[j] = sampler;
  938. textures[j] = texture;
  939. }
  940. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  941. if (sbi) {
  942. if (stage == RDD::SHADER_STAGE_VERTEX) {
  943. [enc setVertexSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  944. } else {
  945. [enc setFragmentSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  946. }
  947. }
  948. if (stage == RDD::SHADER_STAGE_VERTEX) {
  949. [enc setVertexTextures:textures withRange:NSMakeRange(bi->index, count)];
  950. } else {
  951. [enc setFragmentTextures:textures withRange:NSMakeRange(bi->index, count)];
  952. }
  953. } break;
  954. case RDD::UNIFORM_TYPE_TEXTURE: {
  955. size_t count = uniform.ids.size();
  956. if (count == 1) {
  957. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  958. if (stage == RDD::SHADER_STAGE_VERTEX) {
  959. [enc setVertexTexture:obj atIndex:bi->index];
  960. } else {
  961. [enc setFragmentTexture:obj atIndex:bi->index];
  962. }
  963. } else {
  964. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  965. for (size_t j = 0; j < count; j += 1) {
  966. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  967. objects[j] = obj;
  968. }
  969. if (stage == RDD::SHADER_STAGE_VERTEX) {
  970. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  971. } else {
  972. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  973. }
  974. }
  975. } break;
  976. case RDD::UNIFORM_TYPE_IMAGE: {
  977. size_t count = uniform.ids.size();
  978. if (count == 1) {
  979. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  980. if (stage == RDD::SHADER_STAGE_VERTEX) {
  981. [enc setVertexTexture:obj atIndex:bi->index];
  982. } else {
  983. [enc setFragmentTexture:obj atIndex:bi->index];
  984. }
  985. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  986. if (sbi) {
  987. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  988. id<MTLBuffer> buf = tex.buffer;
  989. if (buf) {
  990. if (stage == RDD::SHADER_STAGE_VERTEX) {
  991. [enc setVertexBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  992. } else {
  993. [enc setFragmentBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  994. }
  995. }
  996. }
  997. } else {
  998. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  999. for (size_t j = 0; j < count; j += 1) {
  1000. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1001. objects[j] = obj;
  1002. }
  1003. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1004. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1005. } else {
  1006. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1007. }
  1008. }
  1009. } break;
  1010. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1011. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1012. } break;
  1013. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1014. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1015. } break;
  1016. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1017. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1018. } break;
  1019. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1020. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1021. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1022. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1023. } else {
  1024. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1025. }
  1026. } break;
  1027. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1028. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1029. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1030. [enc setVertexBuffer:buffer offset:0 atIndex:bi->index];
  1031. } else {
  1032. [enc setFragmentBuffer:buffer offset:0 atIndex:bi->index];
  1033. }
  1034. } break;
  1035. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1036. size_t count = uniform.ids.size();
  1037. if (count == 1) {
  1038. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1039. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1040. [enc setVertexTexture:obj atIndex:bi->index];
  1041. } else {
  1042. [enc setFragmentTexture:obj atIndex:bi->index];
  1043. }
  1044. } else {
  1045. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1046. for (size_t j = 0; j < count; j += 1) {
  1047. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1048. objects[j] = obj;
  1049. }
  1050. if (stage == RDD::SHADER_STAGE_VERTEX) {
  1051. [enc setVertexTextures:objects withRange:NSMakeRange(bi->index, count)];
  1052. } else {
  1053. [enc setFragmentTextures:objects withRange:NSMakeRange(bi->index, count)];
  1054. }
  1055. }
  1056. } break;
  1057. default: {
  1058. DEV_ASSERT(false);
  1059. }
  1060. }
  1061. }
  1062. }
  1063. }
  1064. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::RenderState &p_state) {
  1065. if (p_shader->uses_argument_buffers) {
  1066. bind_uniforms_argument_buffers(p_shader, p_state);
  1067. } else {
  1068. bind_uniforms_direct(p_shader, p_state);
  1069. }
  1070. }
  1071. void MDUniformSet::bind_uniforms_argument_buffers(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1072. DEV_ASSERT(p_shader->uses_argument_buffers);
  1073. DEV_ASSERT(p_state.encoder != nil);
  1074. UniformSet const &set_info = p_shader->sets[index];
  1075. id<MTLComputeCommandEncoder> enc = p_state.encoder;
  1076. id<MTLDevice> device = enc.device;
  1077. BoundUniformSet &bus = bound_uniform_set(p_shader, device, p_state.resource_usage);
  1078. uint32_t const *offset = set_info.offsets.getptr(RDD::SHADER_STAGE_COMPUTE);
  1079. if (offset) {
  1080. [enc setBuffer:bus.buffer offset:*offset atIndex:index];
  1081. }
  1082. }
  1083. void MDUniformSet::bind_uniforms_direct(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1084. DEV_ASSERT(!p_shader->uses_argument_buffers);
  1085. DEV_ASSERT(p_state.encoder != nil);
  1086. id<MTLComputeCommandEncoder> __unsafe_unretained enc = p_state.encoder;
  1087. UniformSet const &set = p_shader->sets[index];
  1088. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1089. RDD::BoundUniform const &uniform = uniforms[i];
  1090. UniformInfo ui = set.uniforms[i];
  1091. const RDC::ShaderStage stage = RDC::ShaderStage::SHADER_STAGE_COMPUTE;
  1092. const ShaderStageUsage stage_usage = ShaderStageUsage(1 << stage);
  1093. BindingInfo *bi = ui.bindings.getptr(stage);
  1094. if (bi == nullptr) {
  1095. // No binding for this stage.
  1096. continue;
  1097. }
  1098. if ((ui.active_stages & stage_usage) == 0) {
  1099. // Not active for this state, so don't bind anything.
  1100. continue;
  1101. }
  1102. switch (uniform.type) {
  1103. case RDD::UNIFORM_TYPE_SAMPLER: {
  1104. size_t count = uniform.ids.size();
  1105. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1106. for (size_t j = 0; j < count; j += 1) {
  1107. objects[j] = rid::get(uniform.ids[j].id);
  1108. }
  1109. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1110. } break;
  1111. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1112. size_t count = uniform.ids.size() / 2;
  1113. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1114. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1115. for (uint32_t j = 0; j < count; j += 1) {
  1116. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1117. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1118. samplers[j] = sampler;
  1119. textures[j] = texture;
  1120. }
  1121. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1122. if (sbi) {
  1123. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1124. }
  1125. [enc setTextures:textures withRange:NSMakeRange(bi->index, count)];
  1126. } break;
  1127. case RDD::UNIFORM_TYPE_TEXTURE: {
  1128. size_t count = uniform.ids.size();
  1129. if (count == 1) {
  1130. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1131. [enc setTexture:obj atIndex:bi->index];
  1132. } else {
  1133. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1134. for (size_t j = 0; j < count; j += 1) {
  1135. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1136. objects[j] = obj;
  1137. }
  1138. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1139. }
  1140. } break;
  1141. case RDD::UNIFORM_TYPE_IMAGE: {
  1142. size_t count = uniform.ids.size();
  1143. if (count == 1) {
  1144. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1145. [enc setTexture:obj atIndex:bi->index];
  1146. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1147. if (sbi) {
  1148. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1149. id<MTLBuffer> buf = tex.buffer;
  1150. if (buf) {
  1151. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1152. }
  1153. }
  1154. } else {
  1155. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1156. for (size_t j = 0; j < count; j += 1) {
  1157. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1158. objects[j] = obj;
  1159. }
  1160. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1161. }
  1162. } break;
  1163. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1164. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1165. } break;
  1166. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1167. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1168. } break;
  1169. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1170. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1171. } break;
  1172. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1173. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1174. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1175. } break;
  1176. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1177. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1178. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1179. } break;
  1180. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1181. size_t count = uniform.ids.size();
  1182. if (count == 1) {
  1183. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1184. [enc setTexture:obj atIndex:bi->index];
  1185. } else {
  1186. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1187. for (size_t j = 0; j < count; j += 1) {
  1188. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1189. objects[j] = obj;
  1190. }
  1191. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1192. }
  1193. } break;
  1194. default: {
  1195. DEV_ASSERT(false);
  1196. }
  1197. }
  1198. }
  1199. }
  1200. void MDUniformSet::bind_uniforms(MDShader *p_shader, MDCommandBuffer::ComputeState &p_state) {
  1201. if (p_shader->uses_argument_buffers) {
  1202. bind_uniforms_argument_buffers(p_shader, p_state);
  1203. } else {
  1204. bind_uniforms_direct(p_shader, p_state);
  1205. }
  1206. }
  1207. BoundUniformSet &MDUniformSet::bound_uniform_set(MDShader *p_shader, id<MTLDevice> p_device, ResourceUsageMap &p_resource_usage) {
  1208. BoundUniformSet *sus = bound_uniforms.getptr(p_shader);
  1209. if (sus != nullptr) {
  1210. sus->merge_into(p_resource_usage);
  1211. return *sus;
  1212. }
  1213. UniformSet const &set = p_shader->sets[index];
  1214. HashMap<id<MTLResource>, StageResourceUsage> bound_resources;
  1215. auto add_usage = [&bound_resources](id<MTLResource> __unsafe_unretained res, RDD::ShaderStage stage, MTLResourceUsage usage) {
  1216. StageResourceUsage *sru = bound_resources.getptr(res);
  1217. if (sru == nullptr) {
  1218. bound_resources.insert(res, stage_resource_usage(stage, usage));
  1219. } else {
  1220. *sru |= stage_resource_usage(stage, usage);
  1221. }
  1222. };
  1223. id<MTLBuffer> enc_buffer = nil;
  1224. if (set.buffer_size > 0) {
  1225. MTLResourceOptions options = MTLResourceStorageModeShared | MTLResourceHazardTrackingModeTracked;
  1226. enc_buffer = [p_device newBufferWithLength:set.buffer_size options:options];
  1227. for (KeyValue<RDC::ShaderStage, id<MTLArgumentEncoder>> const &kv : set.encoders) {
  1228. RDD::ShaderStage const stage = kv.key;
  1229. ShaderStageUsage const stage_usage = ShaderStageUsage(1 << stage);
  1230. id<MTLArgumentEncoder> const enc = kv.value;
  1231. [enc setArgumentBuffer:enc_buffer offset:set.offsets[stage]];
  1232. for (uint32_t i = 0; i < uniforms.size(); i++) {
  1233. RDD::BoundUniform const &uniform = uniforms[i];
  1234. UniformInfo ui = set.uniforms[i];
  1235. BindingInfo *bi = ui.bindings.getptr(stage);
  1236. if (bi == nullptr) {
  1237. // No binding for this stage.
  1238. continue;
  1239. }
  1240. if ((ui.active_stages & stage_usage) == 0) {
  1241. // Not active for this state, so don't bind anything.
  1242. continue;
  1243. }
  1244. switch (uniform.type) {
  1245. case RDD::UNIFORM_TYPE_SAMPLER: {
  1246. size_t count = uniform.ids.size();
  1247. id<MTLSamplerState> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1248. for (size_t j = 0; j < count; j += 1) {
  1249. objects[j] = rid::get(uniform.ids[j].id);
  1250. }
  1251. [enc setSamplerStates:objects withRange:NSMakeRange(bi->index, count)];
  1252. } break;
  1253. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE: {
  1254. size_t count = uniform.ids.size() / 2;
  1255. id<MTLTexture> __unsafe_unretained *textures = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1256. id<MTLSamplerState> __unsafe_unretained *samplers = ALLOCA_ARRAY(id<MTLSamplerState> __unsafe_unretained, count);
  1257. for (uint32_t j = 0; j < count; j += 1) {
  1258. id<MTLSamplerState> sampler = rid::get(uniform.ids[j * 2 + 0]);
  1259. id<MTLTexture> texture = rid::get(uniform.ids[j * 2 + 1]);
  1260. samplers[j] = sampler;
  1261. textures[j] = texture;
  1262. add_usage(texture, stage, bi->usage);
  1263. }
  1264. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1265. if (sbi) {
  1266. [enc setSamplerStates:samplers withRange:NSMakeRange(sbi->index, count)];
  1267. }
  1268. [enc setTextures:textures
  1269. withRange:NSMakeRange(bi->index, count)];
  1270. } break;
  1271. case RDD::UNIFORM_TYPE_TEXTURE: {
  1272. size_t count = uniform.ids.size();
  1273. if (count == 1) {
  1274. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1275. [enc setTexture:obj atIndex:bi->index];
  1276. add_usage(obj, stage, bi->usage);
  1277. } else {
  1278. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1279. for (size_t j = 0; j < count; j += 1) {
  1280. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1281. objects[j] = obj;
  1282. add_usage(obj, stage, bi->usage);
  1283. }
  1284. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1285. }
  1286. } break;
  1287. case RDD::UNIFORM_TYPE_IMAGE: {
  1288. size_t count = uniform.ids.size();
  1289. if (count == 1) {
  1290. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1291. [enc setTexture:obj atIndex:bi->index];
  1292. add_usage(obj, stage, bi->usage);
  1293. BindingInfo *sbi = ui.bindings_secondary.getptr(stage);
  1294. if (sbi) {
  1295. id<MTLTexture> tex = obj.parentTexture ? obj.parentTexture : obj;
  1296. id<MTLBuffer> buf = tex.buffer;
  1297. if (buf) {
  1298. [enc setBuffer:buf offset:tex.bufferOffset atIndex:sbi->index];
  1299. }
  1300. }
  1301. } else {
  1302. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1303. for (size_t j = 0; j < count; j += 1) {
  1304. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1305. objects[j] = obj;
  1306. add_usage(obj, stage, bi->usage);
  1307. }
  1308. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1309. }
  1310. } break;
  1311. case RDD::UNIFORM_TYPE_TEXTURE_BUFFER: {
  1312. ERR_PRINT("not implemented: UNIFORM_TYPE_TEXTURE_BUFFER");
  1313. } break;
  1314. case RDD::UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER: {
  1315. ERR_PRINT("not implemented: UNIFORM_TYPE_SAMPLER_WITH_TEXTURE_BUFFER");
  1316. } break;
  1317. case RDD::UNIFORM_TYPE_IMAGE_BUFFER: {
  1318. CRASH_NOW_MSG("not implemented: UNIFORM_TYPE_IMAGE_BUFFER");
  1319. } break;
  1320. case RDD::UNIFORM_TYPE_UNIFORM_BUFFER: {
  1321. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1322. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1323. add_usage(buffer, stage, bi->usage);
  1324. } break;
  1325. case RDD::UNIFORM_TYPE_STORAGE_BUFFER: {
  1326. id<MTLBuffer> buffer = rid::get(uniform.ids[0]);
  1327. [enc setBuffer:buffer offset:0 atIndex:bi->index];
  1328. add_usage(buffer, stage, bi->usage);
  1329. } break;
  1330. case RDD::UNIFORM_TYPE_INPUT_ATTACHMENT: {
  1331. size_t count = uniform.ids.size();
  1332. if (count == 1) {
  1333. id<MTLTexture> obj = rid::get(uniform.ids[0]);
  1334. [enc setTexture:obj atIndex:bi->index];
  1335. add_usage(obj, stage, bi->usage);
  1336. } else {
  1337. id<MTLTexture> __unsafe_unretained *objects = ALLOCA_ARRAY(id<MTLTexture> __unsafe_unretained, count);
  1338. for (size_t j = 0; j < count; j += 1) {
  1339. id<MTLTexture> obj = rid::get(uniform.ids[j]);
  1340. objects[j] = obj;
  1341. add_usage(obj, stage, bi->usage);
  1342. }
  1343. [enc setTextures:objects withRange:NSMakeRange(bi->index, count)];
  1344. }
  1345. } break;
  1346. default: {
  1347. DEV_ASSERT(false);
  1348. }
  1349. }
  1350. }
  1351. }
  1352. }
  1353. SearchArray<__unsafe_unretained id<MTLResource>> search;
  1354. ResourceUsageMap usage_to_resources;
  1355. for (KeyValue<id<MTLResource>, StageResourceUsage> const &keyval : bound_resources) {
  1356. ResourceVector *resources = usage_to_resources.getptr(keyval.value);
  1357. if (resources == nullptr) {
  1358. resources = &usage_to_resources.insert(keyval.value, ResourceVector())->value;
  1359. }
  1360. int64_t pos = search.bisect(resources->ptr(), resources->size(), keyval.key, true);
  1361. if (pos == resources->size() || (*resources)[pos] != keyval.key) {
  1362. resources->insert(pos, keyval.key);
  1363. }
  1364. }
  1365. BoundUniformSet bs = { .buffer = enc_buffer, .usage_to_resources = usage_to_resources };
  1366. bound_uniforms.insert(p_shader, bs);
  1367. bs.merge_into(p_resource_usage);
  1368. return bound_uniforms.get(p_shader);
  1369. }
  1370. MTLFmtCaps MDSubpass::getRequiredFmtCapsForAttachmentAt(uint32_t p_index) const {
  1371. MTLFmtCaps caps = kMTLFmtCapsNone;
  1372. for (RDD::AttachmentReference const &ar : input_references) {
  1373. if (ar.attachment == p_index) {
  1374. flags::set(caps, kMTLFmtCapsRead);
  1375. break;
  1376. }
  1377. }
  1378. for (RDD::AttachmentReference const &ar : color_references) {
  1379. if (ar.attachment == p_index) {
  1380. flags::set(caps, kMTLFmtCapsColorAtt);
  1381. break;
  1382. }
  1383. }
  1384. for (RDD::AttachmentReference const &ar : resolve_references) {
  1385. if (ar.attachment == p_index) {
  1386. flags::set(caps, kMTLFmtCapsResolve);
  1387. break;
  1388. }
  1389. }
  1390. if (depth_stencil_reference.attachment == p_index) {
  1391. flags::set(caps, kMTLFmtCapsDSAtt);
  1392. }
  1393. return caps;
  1394. }
  1395. void MDAttachment::linkToSubpass(const MDRenderPass &p_pass) {
  1396. firstUseSubpassIndex = UINT32_MAX;
  1397. lastUseSubpassIndex = 0;
  1398. for (MDSubpass const &subpass : p_pass.subpasses) {
  1399. MTLFmtCaps reqCaps = subpass.getRequiredFmtCapsForAttachmentAt(index);
  1400. if (reqCaps) {
  1401. firstUseSubpassIndex = MIN(subpass.subpass_index, firstUseSubpassIndex);
  1402. lastUseSubpassIndex = MAX(subpass.subpass_index, lastUseSubpassIndex);
  1403. }
  1404. }
  1405. }
  1406. MTLStoreAction MDAttachment::getMTLStoreAction(MDSubpass const &p_subpass,
  1407. bool p_is_rendering_entire_area,
  1408. bool p_has_resolve,
  1409. bool p_can_resolve,
  1410. bool p_is_stencil) const {
  1411. if (!p_is_rendering_entire_area || !isLastUseOf(p_subpass)) {
  1412. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1413. }
  1414. switch (p_is_stencil ? stencilStoreAction : storeAction) {
  1415. case MTLStoreActionStore:
  1416. return p_has_resolve && p_can_resolve ? MTLStoreActionStoreAndMultisampleResolve : MTLStoreActionStore;
  1417. case MTLStoreActionDontCare:
  1418. return p_has_resolve ? (p_can_resolve ? MTLStoreActionMultisampleResolve : MTLStoreActionStore) : MTLStoreActionDontCare;
  1419. default:
  1420. return MTLStoreActionStore;
  1421. }
  1422. }
  1423. bool MDAttachment::configureDescriptor(MTLRenderPassAttachmentDescriptor *p_desc,
  1424. PixelFormats &p_pf,
  1425. MDSubpass const &p_subpass,
  1426. id<MTLTexture> p_attachment,
  1427. bool p_is_rendering_entire_area,
  1428. bool p_has_resolve,
  1429. bool p_can_resolve,
  1430. bool p_is_stencil) const {
  1431. p_desc.texture = p_attachment;
  1432. MTLLoadAction load;
  1433. if (!p_is_rendering_entire_area || !isFirstUseOf(p_subpass)) {
  1434. load = MTLLoadActionLoad;
  1435. } else {
  1436. load = p_is_stencil ? stencilLoadAction : loadAction;
  1437. }
  1438. p_desc.loadAction = load;
  1439. MTLPixelFormat mtlFmt = p_attachment.pixelFormat;
  1440. bool isDepthFormat = p_pf.isDepthFormat(mtlFmt);
  1441. bool isStencilFormat = p_pf.isStencilFormat(mtlFmt);
  1442. if (isStencilFormat && !p_is_stencil && !isDepthFormat) {
  1443. p_desc.storeAction = MTLStoreActionDontCare;
  1444. } else {
  1445. p_desc.storeAction = getMTLStoreAction(p_subpass, p_is_rendering_entire_area, p_has_resolve, p_can_resolve, p_is_stencil);
  1446. }
  1447. return load == MTLLoadActionClear;
  1448. }
  1449. bool MDAttachment::shouldClear(const MDSubpass &p_subpass, bool p_is_stencil) const {
  1450. // If the subpass is not the first subpass to use this attachment, don't clear this attachment.
  1451. if (p_subpass.subpass_index != firstUseSubpassIndex) {
  1452. return false;
  1453. }
  1454. return (p_is_stencil ? stencilLoadAction : loadAction) == MTLLoadActionClear;
  1455. }
  1456. MDRenderPass::MDRenderPass(Vector<MDAttachment> &p_attachments, Vector<MDSubpass> &p_subpasses) :
  1457. attachments(p_attachments), subpasses(p_subpasses) {
  1458. for (MDAttachment &att : attachments) {
  1459. att.linkToSubpass(*this);
  1460. }
  1461. }
  1462. #pragma mark - Resource Factory
  1463. id<MTLFunction> MDResourceFactory::new_func(NSString *p_source, NSString *p_name, NSError **p_error) {
  1464. @autoreleasepool {
  1465. NSError *err = nil;
  1466. MTLCompileOptions *options = [MTLCompileOptions new];
  1467. id<MTLDevice> device = device_driver->get_device();
  1468. id<MTLLibrary> mtlLib = [device newLibraryWithSource:p_source
  1469. options:options
  1470. error:&err];
  1471. if (err) {
  1472. if (p_error != nil) {
  1473. *p_error = err;
  1474. }
  1475. }
  1476. return [mtlLib newFunctionWithName:p_name];
  1477. }
  1478. }
  1479. id<MTLFunction> MDResourceFactory::new_clear_vert_func(ClearAttKey &p_key) {
  1480. @autoreleasepool {
  1481. NSString *msl = [NSString stringWithFormat:@R"(
  1482. #include <metal_stdlib>
  1483. using namespace metal;
  1484. typedef struct {
  1485. float4 a_position [[attribute(0)]];
  1486. } AttributesPos;
  1487. typedef struct {
  1488. float4 colors[9];
  1489. } ClearColorsIn;
  1490. typedef struct {
  1491. float4 v_position [[position]];
  1492. uint layer%s;
  1493. } VaryingsPos;
  1494. vertex VaryingsPos vertClear(AttributesPos attributes [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1495. VaryingsPos varyings;
  1496. varyings.v_position = float4(attributes.a_position.x, -attributes.a_position.y, ccIn.colors[%d].r, 1.0);
  1497. varyings.layer = uint(attributes.a_position.w);
  1498. return varyings;
  1499. }
  1500. )", p_key.is_layered_rendering_enabled() ? " [[render_target_array_index]]" : "", ClearAttKey::DEPTH_INDEX];
  1501. return new_func(msl, @"vertClear", nil);
  1502. }
  1503. }
  1504. id<MTLFunction> MDResourceFactory::new_clear_frag_func(ClearAttKey &p_key) {
  1505. @autoreleasepool {
  1506. NSMutableString *msl = [NSMutableString stringWithCapacity:2048];
  1507. [msl appendFormat:@R"(
  1508. #include <metal_stdlib>
  1509. using namespace metal;
  1510. typedef struct {
  1511. float4 v_position [[position]];
  1512. } VaryingsPos;
  1513. typedef struct {
  1514. float4 colors[9];
  1515. } ClearColorsIn;
  1516. typedef struct {
  1517. )"];
  1518. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1519. if (p_key.is_enabled(caIdx)) {
  1520. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1521. [msl appendFormat:@" %@4 color%u [[color(%u)]];\n", typeStr, caIdx, caIdx];
  1522. }
  1523. }
  1524. [msl appendFormat:@R"(} ClearColorsOut;
  1525. fragment ClearColorsOut fragClear(VaryingsPos varyings [[stage_in]], constant ClearColorsIn& ccIn [[buffer(0)]]) {
  1526. ClearColorsOut ccOut;
  1527. )"];
  1528. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1529. if (p_key.is_enabled(caIdx)) {
  1530. NSString *typeStr = get_format_type_string((MTLPixelFormat)p_key.pixel_formats[caIdx]);
  1531. [msl appendFormat:@" ccOut.color%u = %@4(ccIn.colors[%u]);\n", caIdx, typeStr, caIdx];
  1532. }
  1533. }
  1534. [msl appendString:@R"( return ccOut;
  1535. })"];
  1536. return new_func(msl, @"fragClear", nil);
  1537. }
  1538. }
  1539. NSString *MDResourceFactory::get_format_type_string(MTLPixelFormat p_fmt) {
  1540. switch (device_driver->get_pixel_formats().getFormatType(p_fmt)) {
  1541. case MTLFormatType::ColorInt8:
  1542. case MTLFormatType::ColorInt16:
  1543. return @"short";
  1544. case MTLFormatType::ColorUInt8:
  1545. case MTLFormatType::ColorUInt16:
  1546. return @"ushort";
  1547. case MTLFormatType::ColorInt32:
  1548. return @"int";
  1549. case MTLFormatType::ColorUInt32:
  1550. return @"uint";
  1551. case MTLFormatType::ColorHalf:
  1552. return @"half";
  1553. case MTLFormatType::ColorFloat:
  1554. case MTLFormatType::DepthStencil:
  1555. case MTLFormatType::Compressed:
  1556. return @"float";
  1557. case MTLFormatType::None:
  1558. return @"unexpected_MTLPixelFormatInvalid";
  1559. }
  1560. }
  1561. id<MTLDepthStencilState> MDResourceFactory::new_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1562. MTLDepthStencilDescriptor *dsDesc = [MTLDepthStencilDescriptor new];
  1563. dsDesc.depthCompareFunction = MTLCompareFunctionAlways;
  1564. dsDesc.depthWriteEnabled = p_use_depth;
  1565. if (p_use_stencil) {
  1566. MTLStencilDescriptor *sDesc = [MTLStencilDescriptor new];
  1567. sDesc.stencilCompareFunction = MTLCompareFunctionAlways;
  1568. sDesc.stencilFailureOperation = MTLStencilOperationReplace;
  1569. sDesc.depthFailureOperation = MTLStencilOperationReplace;
  1570. sDesc.depthStencilPassOperation = MTLStencilOperationReplace;
  1571. dsDesc.frontFaceStencil = sDesc;
  1572. dsDesc.backFaceStencil = sDesc;
  1573. } else {
  1574. dsDesc.frontFaceStencil = nil;
  1575. dsDesc.backFaceStencil = nil;
  1576. }
  1577. return [device_driver->get_device() newDepthStencilStateWithDescriptor:dsDesc];
  1578. }
  1579. id<MTLRenderPipelineState> MDResourceFactory::new_clear_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1580. PixelFormats &pixFmts = device_driver->get_pixel_formats();
  1581. id<MTLFunction> vtxFunc = new_clear_vert_func(p_key);
  1582. id<MTLFunction> fragFunc = new_clear_frag_func(p_key);
  1583. MTLRenderPipelineDescriptor *plDesc = [MTLRenderPipelineDescriptor new];
  1584. plDesc.label = @"ClearRenderAttachments";
  1585. plDesc.vertexFunction = vtxFunc;
  1586. plDesc.fragmentFunction = fragFunc;
  1587. plDesc.rasterSampleCount = p_key.sample_count;
  1588. plDesc.inputPrimitiveTopology = MTLPrimitiveTopologyClassTriangle;
  1589. for (uint32_t caIdx = 0; caIdx < ClearAttKey::COLOR_COUNT; caIdx++) {
  1590. MTLRenderPipelineColorAttachmentDescriptor *colorDesc = plDesc.colorAttachments[caIdx];
  1591. colorDesc.pixelFormat = (MTLPixelFormat)p_key.pixel_formats[caIdx];
  1592. colorDesc.writeMask = p_key.is_enabled(caIdx) ? MTLColorWriteMaskAll : MTLColorWriteMaskNone;
  1593. }
  1594. MTLPixelFormat mtlDepthFormat = p_key.depth_format();
  1595. if (pixFmts.isDepthFormat(mtlDepthFormat)) {
  1596. plDesc.depthAttachmentPixelFormat = mtlDepthFormat;
  1597. }
  1598. MTLPixelFormat mtlStencilFormat = p_key.stencil_format();
  1599. if (pixFmts.isStencilFormat(mtlStencilFormat)) {
  1600. plDesc.stencilAttachmentPixelFormat = mtlStencilFormat;
  1601. }
  1602. MTLVertexDescriptor *vtxDesc = plDesc.vertexDescriptor;
  1603. // Vertex attribute descriptors.
  1604. MTLVertexAttributeDescriptorArray *vaDescArray = vtxDesc.attributes;
  1605. MTLVertexAttributeDescriptor *vaDesc;
  1606. NSUInteger vtxBuffIdx = device_driver->get_metal_buffer_index_for_vertex_attribute_binding(VERT_CONTENT_BUFFER_INDEX);
  1607. NSUInteger vtxStride = 0;
  1608. // Vertex location.
  1609. vaDesc = vaDescArray[0];
  1610. vaDesc.format = MTLVertexFormatFloat4;
  1611. vaDesc.bufferIndex = vtxBuffIdx;
  1612. vaDesc.offset = vtxStride;
  1613. vtxStride += sizeof(simd::float4);
  1614. // Vertex attribute buffer.
  1615. MTLVertexBufferLayoutDescriptorArray *vbDescArray = vtxDesc.layouts;
  1616. MTLVertexBufferLayoutDescriptor *vbDesc = vbDescArray[vtxBuffIdx];
  1617. vbDesc.stepFunction = MTLVertexStepFunctionPerVertex;
  1618. vbDesc.stepRate = 1;
  1619. vbDesc.stride = vtxStride;
  1620. return [device_driver->get_device() newRenderPipelineStateWithDescriptor:plDesc error:p_error];
  1621. }
  1622. id<MTLRenderPipelineState> MDResourceCache::get_clear_render_pipeline_state(ClearAttKey &p_key, NSError **p_error) {
  1623. HashMap::ConstIterator it = clear_states.find(p_key);
  1624. if (it != clear_states.end()) {
  1625. return it->value;
  1626. }
  1627. id<MTLRenderPipelineState> state = resource_factory->new_clear_pipeline_state(p_key, p_error);
  1628. clear_states[p_key] = state;
  1629. return state;
  1630. }
  1631. id<MTLDepthStencilState> MDResourceCache::get_depth_stencil_state(bool p_use_depth, bool p_use_stencil) {
  1632. id<MTLDepthStencilState> __strong *val;
  1633. if (p_use_depth && p_use_stencil) {
  1634. val = &clear_depth_stencil_state.all;
  1635. } else if (p_use_depth) {
  1636. val = &clear_depth_stencil_state.depth_only;
  1637. } else if (p_use_stencil) {
  1638. val = &clear_depth_stencil_state.stencil_only;
  1639. } else {
  1640. val = &clear_depth_stencil_state.none;
  1641. }
  1642. DEV_ASSERT(val != nullptr);
  1643. if (*val == nil) {
  1644. *val = resource_factory->new_depth_stencil_state(p_use_depth, p_use_stencil);
  1645. }
  1646. return *val;
  1647. }
  1648. static const char *SHADER_STAGE_NAMES[] = {
  1649. [RD::SHADER_STAGE_VERTEX] = "vert",
  1650. [RD::SHADER_STAGE_FRAGMENT] = "frag",
  1651. [RD::SHADER_STAGE_TESSELATION_CONTROL] = "tess_ctrl",
  1652. [RD::SHADER_STAGE_TESSELATION_EVALUATION] = "tess_eval",
  1653. [RD::SHADER_STAGE_COMPUTE] = "comp",
  1654. };
  1655. void ShaderCacheEntry::notify_free() const {
  1656. owner.shader_cache_free_entry(key);
  1657. }
  1658. @interface MDLibrary ()
  1659. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry;
  1660. @end
  1661. /// Loads the MTLLibrary when the library is first accessed.
  1662. @interface MDLazyLibrary : MDLibrary {
  1663. id<MTLLibrary> _library;
  1664. NSError *_error;
  1665. std::shared_mutex _mu;
  1666. bool _loaded;
  1667. id<MTLDevice> _device;
  1668. NSString *_source;
  1669. MTLCompileOptions *_options;
  1670. }
  1671. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1672. device:(id<MTLDevice>)device
  1673. source:(NSString *)source
  1674. options:(MTLCompileOptions *)options;
  1675. @end
  1676. /// Loads the MTLLibrary immediately on initialization, using an asynchronous API.
  1677. @interface MDImmediateLibrary : MDLibrary {
  1678. id<MTLLibrary> _library;
  1679. NSError *_error;
  1680. std::mutex _cv_mutex;
  1681. std::condition_variable _cv;
  1682. std::atomic<bool> _complete;
  1683. bool _ready;
  1684. }
  1685. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1686. device:(id<MTLDevice>)device
  1687. source:(NSString *)source
  1688. options:(MTLCompileOptions *)options;
  1689. @end
  1690. @implementation MDLibrary
  1691. + (instancetype)newLibraryWithCacheEntry:(ShaderCacheEntry *)entry
  1692. device:(id<MTLDevice>)device
  1693. source:(NSString *)source
  1694. options:(MTLCompileOptions *)options
  1695. strategy:(ShaderLoadStrategy)strategy {
  1696. switch (strategy) {
  1697. case ShaderLoadStrategy::DEFAULT:
  1698. [[fallthrough]];
  1699. default:
  1700. return [[MDImmediateLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1701. case ShaderLoadStrategy::LAZY:
  1702. return [[MDLazyLibrary alloc] initWithCacheEntry:entry device:device source:source options:options];
  1703. }
  1704. }
  1705. - (id<MTLLibrary>)library {
  1706. CRASH_NOW_MSG("Not implemented");
  1707. return nil;
  1708. }
  1709. - (NSError *)error {
  1710. CRASH_NOW_MSG("Not implemented");
  1711. return nil;
  1712. }
  1713. - (void)setLabel:(NSString *)label {
  1714. }
  1715. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry {
  1716. self = [super init];
  1717. _entry = entry;
  1718. _entry->library = self;
  1719. return self;
  1720. }
  1721. - (void)dealloc {
  1722. _entry->notify_free();
  1723. }
  1724. @end
  1725. @implementation MDImmediateLibrary
  1726. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1727. device:(id<MTLDevice>)device
  1728. source:(NSString *)source
  1729. options:(MTLCompileOptions *)options {
  1730. self = [super initWithCacheEntry:entry];
  1731. _complete = false;
  1732. _ready = false;
  1733. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1734. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1735. "shader_name=%{public}s stage=%{public}s hash=%X",
  1736. entry->name.get_data(), SHADER_STAGE_NAMES[entry->stage], entry->key.short_sha());
  1737. [device newLibraryWithSource:source
  1738. options:options
  1739. completionHandler:^(id<MTLLibrary> library, NSError *error) {
  1740. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1741. self->_library = library;
  1742. self->_error = error;
  1743. if (error) {
  1744. ERR_PRINT(vformat(U"Error compiling shader %s: %s", entry->name.get_data(), error.localizedDescription.UTF8String));
  1745. }
  1746. {
  1747. std::lock_guard<std::mutex> lock(self->_cv_mutex);
  1748. _ready = true;
  1749. }
  1750. _cv.notify_all();
  1751. _complete = true;
  1752. }];
  1753. return self;
  1754. }
  1755. - (id<MTLLibrary>)library {
  1756. if (!_complete) {
  1757. std::unique_lock<std::mutex> lock(_cv_mutex);
  1758. _cv.wait(lock, [&] { return _ready; });
  1759. }
  1760. return _library;
  1761. }
  1762. - (NSError *)error {
  1763. if (!_complete) {
  1764. std::unique_lock<std::mutex> lock(_cv_mutex);
  1765. _cv.wait(lock, [&] { return _ready; });
  1766. }
  1767. return _error;
  1768. }
  1769. @end
  1770. @implementation MDLazyLibrary
  1771. - (instancetype)initWithCacheEntry:(ShaderCacheEntry *)entry
  1772. device:(id<MTLDevice>)device
  1773. source:(NSString *)source
  1774. options:(MTLCompileOptions *)options {
  1775. self = [super initWithCacheEntry:entry];
  1776. _device = device;
  1777. _source = source;
  1778. _options = options;
  1779. return self;
  1780. }
  1781. - (void)load {
  1782. {
  1783. std::shared_lock<std::shared_mutex> lock(_mu);
  1784. if (_loaded) {
  1785. return;
  1786. }
  1787. }
  1788. std::unique_lock<std::shared_mutex> lock(_mu);
  1789. if (_loaded) {
  1790. return;
  1791. }
  1792. __block os_signpost_id_t compile_id = (os_signpost_id_t)(uintptr_t)self;
  1793. os_signpost_interval_begin(LOG_INTERVALS, compile_id, "shader_compile",
  1794. "shader_name=%{public}s stage=%{public}s hash=%X",
  1795. _entry->name.get_data(), SHADER_STAGE_NAMES[_entry->stage], _entry->key.short_sha());
  1796. NSError *error;
  1797. _library = [_device newLibraryWithSource:_source options:_options error:&error];
  1798. os_signpost_interval_end(LOG_INTERVALS, compile_id, "shader_compile");
  1799. _device = nil;
  1800. _source = nil;
  1801. _options = nil;
  1802. _loaded = true;
  1803. }
  1804. - (id<MTLLibrary>)library {
  1805. [self load];
  1806. return _library;
  1807. }
  1808. - (NSError *)error {
  1809. [self load];
  1810. return _error;
  1811. }
  1812. @end